[llvm] 51b9e33 - [6/8][RISCV] Add rounding mode control variant for vfsqrt, vfrec7
via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 13 00:52:01 PDT 2023
Author: eopXD
Date: 2023-07-13T00:51:51-07:00
New Revision: 51b9e336619b09305357e5ec43f64890d4965e20
URL: https://github.com/llvm/llvm-project/commit/51b9e336619b09305357e5ec43f64890d4965e20
DIFF: https://github.com/llvm/llvm-project/commit/51b9e336619b09305357e5ec43f64890d4965e20.diff
LOG: [6/8][RISCV] Add rounding mode control variant for vfsqrt, vfrec7
Depends on D154633
For the cover letter of the patch-set, please checkout D154628.
This is the 6th patch of the patch-set.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D154634
Added:
clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfrec7-out-of-range.c
clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfsqrt-out-of-range.c
Modified:
clang/include/clang/Basic/riscv_vector.td
clang/lib/Sema/SemaChecking.cpp
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsqrt.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
llvm/test/CodeGen/RISCV/rvv/vfrec7.ll
llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll
Removed:
################################################################################
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 42882bd9da373e..16ab8c57daff1b 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2163,16 +2163,67 @@ let ManualCodegen = [{
}
-// 14.8. Vector Floating-Point Square-Root Instruction
let UnMaskedPolicyScheme = HasPassthruOperand in {
-def vfsqrt : RVVFloatingUnaryVVBuiltin;
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, round_mode, vl)
+ // Masked: (passthru, op0, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) :
+ (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 1]); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 1]); // vl
+ }
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 14.8. Vector Floating-Point Square-Root Instruction
+ defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vvu"]]>;
+
+ // 14.10. Vector Floating-Point Reciprocal Estimate Instruction
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vvu"]]>;
+ }
+ // 14.8. Vector Floating-Point Square-Root Instruction
+ defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vv"]]>;
+
+ // 14.10. Vector Floating-Point Reciprocal Estimate Instruction
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vv"]]>;
+}
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
def vfrsqrt7 : RVVFloatingUnaryVVBuiltin;
-// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
-def vfrec7 : RVVFloatingUnaryVVBuiltin;
-
// 14.11. Vector Floating-Point MIN/MAX Instructions
defm vfmin : RVVFloatingBinBuiltinSet;
defm vfmax : RVVFloatingBinBuiltinSet;
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 774b38b677997a..5781b0a78ac5c6 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -4808,6 +4808,9 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 4);
case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
@@ -4828,6 +4831,10 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tama:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tama:
return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4);
case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
@@ -4917,6 +4924,12 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tama:
case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tama:
case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tama:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4);
case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tama:
case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tama:
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c
index fc61ba21511c04..3d42f6ab490918 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4
// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2
// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1
// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2
// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4
// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8
// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2
// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1
// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2
// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -100,7 +100,7 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4
// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -110,7 +110,7 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8
// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -120,7 +120,7 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1
// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2
// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4
// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8
// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
@@ -180,7 +180,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
@@ -190,7 +190,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
@@ -200,7 +200,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
@@ -210,7 +210,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
@@ -220,7 +220,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
@@ -260,7 +260,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
@@ -270,7 +270,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
@@ -300,10 +300,310 @@ vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
return __riscv_vfrec7_v_f64m8_m(mask, op1, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm(vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf4_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm(vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm(vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m1_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm(vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm(vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m4_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm(vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m8_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm(vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32mf2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm(vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m1_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm(vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm(vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m4_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm(vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m8_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm(vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m1_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm(vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm(vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m4_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm(vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m8_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf4_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32mf2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c
index e9fedbb14fe464..b166d133f4794d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4
// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2
// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1
// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2
// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4
// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8
// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2
// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1
// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2
// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -100,7 +100,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4
// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -110,7 +110,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8
// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -120,7 +120,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1
// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2
// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4
// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8
// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
@@ -180,7 +180,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
@@ -190,7 +190,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
@@ -200,7 +200,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
@@ -210,7 +210,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
@@ -220,7 +220,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
@@ -260,7 +260,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
@@ -270,7 +270,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
@@ -300,10 +300,310 @@ vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
return __riscv_vfsqrt_v_f64m8_m(mask, op1, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm(vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf4_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm(vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm(vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m1_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm(vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm(vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m4_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm(vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m8_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm(vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32mf2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm(vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m1_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm(vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm(vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m4_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm(vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m8_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm(vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m1_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm(vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m2_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm(vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m4_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm(vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m8_rm(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf4_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32mf2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c
index 174c056ba4e75a..b89a4cf8099235 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4
// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2
// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1
// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2
// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4
// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8
// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2
// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1
// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2
// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -100,7 +100,7 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4
// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -110,7 +110,7 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8
// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -120,7 +120,7 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1
// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2
// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4
// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8
// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
@@ -180,7 +180,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
@@ -190,7 +190,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
@@ -200,7 +200,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
@@ -210,7 +210,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
@@ -220,7 +220,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
@@ -260,7 +260,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
@@ -270,7 +270,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
@@ -300,10 +300,310 @@ vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
return __riscv_vfrec7(mask, op1, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm(vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm(vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm(vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm(vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm(vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm(vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm(vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm(vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm(vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm(vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm(vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm(vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm(vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm(vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm(vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c
index 3f285bfddd49e9..38f71d156b744d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsqrt.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4
// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2
// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1
// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2
// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4
// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8
// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2
// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1
// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2
// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
@@ -100,7 +100,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4
// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
@@ -110,7 +110,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8
// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
@@ -120,7 +120,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1
// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2
// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4
// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8
// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
@@ -180,7 +180,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
@@ -190,7 +190,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
@@ -200,7 +200,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
@@ -210,7 +210,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
@@ -220,7 +220,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
@@ -260,7 +260,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
@@ -270,7 +270,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
@@ -300,10 +300,310 @@ vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
return __riscv_vfsqrt(mask, op1, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm(vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm(vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm(vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm(vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm(vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm(vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm(vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm(vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm(vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm(vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm(vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm(vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm(vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm(vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm(vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7.c
index b829d7974fcfc4..f1c6fcb3dd2c45 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -100,7 +100,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -110,7 +110,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -120,7 +120,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -180,7 +180,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -190,7 +190,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -200,7 +200,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -210,7 +210,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -220,7 +220,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -260,7 +260,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -270,7 +270,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -300,7 +300,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -320,7 +320,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -330,7 +330,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -340,7 +340,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -350,7 +350,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -360,7 +360,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -370,7 +370,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -380,7 +380,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -390,7 +390,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -400,7 +400,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -410,7 +410,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -420,7 +420,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -430,7 +430,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -440,7 +440,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -450,7 +450,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -460,7 +460,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -470,7 +470,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -480,7 +480,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -490,7 +490,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -500,7 +500,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -510,7 +510,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -520,7 +520,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -530,7 +530,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -540,7 +540,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -550,7 +550,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -560,7 +560,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -570,7 +570,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -580,7 +580,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -590,7 +590,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -600,10 +600,610 @@ vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
return __riscv_vfrec7_v_f64m8_mu(mask, maskedoff, op1, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32mf2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32mf2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32mf2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16mf2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f16m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32mf2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f32m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_f64m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsqrt.c
index 750e99783dbc98..e9fba6434c6251 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsqrt.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -100,7 +100,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -110,7 +110,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -120,7 +120,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -180,7 +180,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -190,7 +190,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -200,7 +200,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -210,7 +210,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -220,7 +220,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -260,7 +260,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -270,7 +270,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -300,7 +300,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -320,7 +320,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -330,7 +330,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -340,7 +340,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -350,7 +350,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -360,7 +360,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -370,7 +370,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -380,7 +380,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -390,7 +390,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -400,7 +400,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -410,7 +410,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -420,7 +420,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -430,7 +430,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -440,7 +440,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -450,7 +450,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -460,7 +460,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -470,7 +470,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -480,7 +480,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -490,7 +490,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -500,7 +500,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -510,7 +510,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -520,7 +520,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -530,7 +530,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -540,7 +540,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -550,7 +550,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -560,7 +560,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -570,7 +570,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -580,7 +580,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -590,7 +590,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -600,10 +600,610 @@ vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
return __riscv_vfsqrt_v_f64m8_mu(mask, maskedoff, op1, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32mf2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32mf2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32mf2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16mf2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f16m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32mf2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f32m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_v_f64m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c
index 7391c088d989f5..82d761be0158ff 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -100,7 +100,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -110,7 +110,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -120,7 +120,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -180,7 +180,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -190,7 +190,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -200,7 +200,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -210,7 +210,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -220,7 +220,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -260,7 +260,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -270,7 +270,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -300,7 +300,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -320,7 +320,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -330,7 +330,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -340,7 +340,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -350,7 +350,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -360,7 +360,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -370,7 +370,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -380,7 +380,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -390,7 +390,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -400,7 +400,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -410,7 +410,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -420,7 +420,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -430,7 +430,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -440,7 +440,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -450,7 +450,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -460,7 +460,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -470,7 +470,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -480,7 +480,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -490,7 +490,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -500,7 +500,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -510,7 +510,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -520,7 +520,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -530,7 +530,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -540,7 +540,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -550,7 +550,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -560,7 +560,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -570,7 +570,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -580,7 +580,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -590,7 +590,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -600,10 +600,610 @@ vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfrec7_v_f16mf4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfrec7_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfrec7_v_f16mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfrec7_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfrec7_v_f16m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfrec7_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfrec7_v_f16m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfrec7_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfrec7_v_f16m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfrec7_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfrec7_v_f16m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfrec7_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfrec7_v_f32mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfrec7_v_f32m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfrec7_v_f32m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfrec7_v_f32m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfrec7_v_f32m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfrec7_v_f64m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfrec7_v_f64m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfrec7_v_f64m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfrec7_v_f64m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c
index 599ad37646b99d..c9698319ca2517 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsqrt.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -80,7 +80,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -90,7 +90,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -100,7 +100,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -110,7 +110,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -120,7 +120,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -130,7 +130,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -140,7 +140,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -160,7 +160,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, si
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -170,7 +170,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -180,7 +180,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -190,7 +190,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -200,7 +200,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -210,7 +210,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -220,7 +220,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -240,7 +240,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -260,7 +260,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -270,7 +270,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -300,7 +300,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -320,7 +320,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -330,7 +330,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -340,7 +340,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -350,7 +350,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -360,7 +360,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -370,7 +370,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -380,7 +380,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -390,7 +390,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -400,7 +400,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -410,7 +410,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -420,7 +420,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -430,7 +430,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -440,7 +440,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -450,7 +450,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
@@ -460,7 +460,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
@@ -470,7 +470,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
@@ -480,7 +480,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
@@ -490,7 +490,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
@@ -500,7 +500,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
@@ -510,7 +510,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
@@ -520,7 +520,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
@@ -530,7 +530,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
@@ -540,7 +540,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
@@ -550,7 +550,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
@@ -560,7 +560,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
@@ -570,7 +570,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
@@ -580,7 +580,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
@@ -590,7 +590,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
@@ -600,10 +600,610 @@ vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl);
}
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfsqrt_v_f16mf4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfsqrt_v_f16mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfsqrt_v_f16m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfsqrt_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfsqrt_v_f16m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfsqrt_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfsqrt_v_f16m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfsqrt_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfsqrt_v_f16m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfsqrt_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfsqrt_v_f32mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfsqrt_v_f32m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfsqrt_v_f32m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfsqrt_v_f32m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfsqrt_v_f32m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfsqrt_v_f64m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfsqrt_v_f64m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfsqrt_v_f64m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfsqrt_v_f64m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) {
+ return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfrec7-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfrec7-out-of-range.c
new file mode 100644
index 00000000000000..a202e7f5286a89
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfrec7-out-of-range.c
@@ -0,0 +1,36 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN: -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vfloat32m1_t test_vfrec7_v_f32m1_rm(vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfrec7_v_f32m1_rm(op1, 5, vl);
+}
+
+vfloat32m1_t test_vfrec7_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfrec7_v_f32m1_rm_m(mask, op1, 5, vl);
+}
+
+vfloat32m1_t test_vfrec7_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfrec7_v_f32m1_rm_tu(maskedoff, op1, 5, vl);
+}
+
+vfloat32m1_t test_vfrec7_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfrec7_v_f32m1_rm_tum(mask, maskedoff, op1, 5, vl);
+}
+
+vfloat32m1_t test_vfrec7_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfrec7_v_f32m1_rm_tumu(mask, maskedoff, op1, 5, vl);
+}
+
+vfloat32m1_t test_vfrec7_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfrec7_v_f32m1_rm_mu(mask, maskedoff, op1, 5, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfsqrt-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfsqrt-out-of-range.c
new file mode 100644
index 00000000000000..8fa8749ea7c35e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfsqrt-out-of-range.c
@@ -0,0 +1,36 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN: -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vfloat32m1_t test_vfsqrt_v_f32m1_rm(vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfsqrt_v_f32m1_rm(op1, 5, vl);
+}
+
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfsqrt_v_f32m1_rm_m(mask, op1, 5, vl);
+}
+
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfsqrt_v_f32m1_rm_tu(maskedoff, op1, 5, vl);
+}
+
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfsqrt_v_f32m1_rm_tum(mask, maskedoff, op1, 5, vl);
+}
+
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfsqrt_v_f32m1_rm_tumu(mask, maskedoff, op1, 5, vl);
+}
+
+vfloat32m1_t test_vfsqrt_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) {
+ // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+ return __riscv_vfsqrt_v_f32m1_rm_mu(mask, maskedoff, op1, 5, vl);
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 0afd3b7c9592a6..4433e6e9289d9f 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -323,6 +323,24 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
+ // For destination vector type is the same as source vector.
+ // Input: (passthru, vector_in, frm, vl)
+ class RISCVUnaryAAUnMaskedRoundingMode
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
+ [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
+ // For destination vector type is the same as first source vector (with mask).
+ // Input: (vector_in, vector_in, mask, frm, vl, policy)
+ class RISCVUnaryAAMaskedRoundingMode
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+ LLVMMatchType<1>, LLVMMatchType<1>],
+ [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
// Input: (passthru, vector_in, vector_in, mask, vl)
class RISCVCompress
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
@@ -1166,6 +1184,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVUnaryAAUnMasked;
def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked;
}
+ multiclass RISCVUnaryAARoundingMode {
+ def "int_riscv_" # NAME : RISCVUnaryAAUnMaskedRoundingMode;
+ def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMaskedRoundingMode;
+ }
multiclass RISCVUnaryAB {
def "int_riscv_" # NAME : RISCVUnaryABUnMasked;
def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked;
@@ -1486,9 +1508,9 @@ let TargetPrefix = "riscv" in {
defm vfwmsac : RISCVTernaryWideRoundingMode;
defm vfwnmsac : RISCVTernaryWideRoundingMode;
- defm vfsqrt : RISCVUnaryAA;
+ defm vfsqrt : RISCVUnaryAARoundingMode;
defm vfrsqrt7 : RISCVUnaryAA;
- defm vfrec7 : RISCVUnaryAA;
+ defm vfrec7 : RISCVUnaryAARoundingMode;
defm vfmin : RISCVBinaryAAX;
defm vfmax : RISCVBinaryAAX;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 91296758a5744c..13a777bdce36a0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -961,6 +961,23 @@ class VPseudoUnaryNoMask<DAGOperand RetClass, DAGOperand OpClass,
let HasVecPolicyOp = 1;
}
+class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass, DAGOperand OpClass,
+ string Constraint = ""> :
+ Pseudo<(outs RetClass:$rd),
+ (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew,
+ ixlenimm:$policy), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasVecPolicyOp = 1;
+ let HasRoundModeOp = 1;
+ let UsesVXRM = 0;
+}
+
class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
@@ -976,6 +993,24 @@ class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
let UsesMaskPolicy = 1;
}
+class VPseudoUnaryMaskRoundingMode<VReg RetClass, VReg OpClass, string Constraint = ""> :
+ Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+ (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+ VMaskOp:$vm, ixlenimm:$rm,
+ AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
+ let HasRoundModeOp = 1;
+ let UsesVXRM = 0;
+}
+
class VPseudoUnaryMask_NoExcept<VReg RetClass, VReg OpClass, string Constraint = ""> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2, VMaskOp:$vm,
@@ -2340,7 +2375,7 @@ multiclass VPseudoVCLS_V {
}
}
-multiclass VPseudoVSQR_V {
+multiclass VPseudoVSQR_V_RM {
foreach m = MxListF in {
defvar mx = m.MX;
defvar sews = SchedSEWSet<m.MX, /*isF*/ 1>.val;
@@ -2351,10 +2386,10 @@ multiclass VPseudoVSQR_V {
defvar WriteVFSqrtV_MX_E = !cast<SchedWrite>("WriteVFSqrtV" # suffix);
defvar ReadVFSqrtV_MX_E = !cast<SchedRead>("ReadVFSqrtV" # suffix);
- def "_V" # suffix : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
+ def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
Sched<[WriteVFSqrtV_MX_E, ReadVFSqrtV_MX_E,
ReadVMask]>;
- def "_V" # suffix # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
+ def "_V" # suffix # "_MASK" : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
Sched<[WriteVFSqrtV_MX_E, ReadVFSqrtV_MX_E,
ReadVMask]>;
@@ -2378,6 +2413,22 @@ multiclass VPseudoVRCP_V {
}
}
+multiclass VPseudoVRCP_V_RM {
+ foreach m = MxListF in {
+ defvar mx = m.MX;
+ defvar WriteVFRecpV_MX = !cast<SchedWrite>("WriteVFRecpV_" # mx);
+ defvar ReadVFRecpV_MX = !cast<SchedRead>("ReadVFRecpV_" # mx);
+
+ let VLMul = m.value in {
+ def "_V_" # mx : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
+ Sched<[WriteVFRecpV_MX, ReadVFRecpV_MX, ReadVMask]>;
+ def "_V_" # mx # "_MASK" : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
+ RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
+ Sched<[WriteVFRecpV_MX, ReadVFRecpV_MX, ReadVMask]>;
+ }
+ }
+}
+
multiclass PseudoVEXT_VF2 {
defvar constraints = "@earlyclobber $rd";
foreach m = MxListVF2 in {
@@ -3972,6 +4023,31 @@ class VPatUnaryNoMask<string intrinsic_name,
(op2_type op2_reg_class:$rs2),
GPR:$vl, log2sew, TU_MU)>;
+class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
+ string inst,
+ string kind,
+ ValueType result_type,
+ ValueType op2_type,
+ int log2sew,
+ LMULInfo vlmul,
+ VReg result_reg_class,
+ VReg op2_reg_class,
+ bit isSEWAware = 0> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (result_type result_reg_class:$merge),
+ (op2_type op2_reg_class:$rs2),
+ (XLenVT timm:$round),
+ VLOpFrag)),
+ (!cast<Instruction>(
+ !if(isSEWAware,
+ inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
+ inst#"_"#kind#"_"#vlmul.MX))
+ (result_type result_reg_class:$merge),
+ (op2_type op2_reg_class:$rs2),
+ (XLenVT timm:$round),
+ GPR:$vl, log2sew, TU_MU)>;
+
+
class VPatUnaryMask<string intrinsic_name,
string inst,
string kind,
@@ -3996,6 +4072,34 @@ class VPatUnaryMask<string intrinsic_name,
(op2_type op2_reg_class:$rs2),
(mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
+class VPatUnaryMaskRoundingMode<string intrinsic_name,
+ string inst,
+ string kind,
+ ValueType result_type,
+ ValueType op2_type,
+ ValueType mask_type,
+ int log2sew,
+ LMULInfo vlmul,
+ VReg result_reg_class,
+ VReg op2_reg_class,
+ bit isSEWAware = 0> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
+ (result_type result_reg_class:$merge),
+ (op2_type op2_reg_class:$rs2),
+ (mask_type V0),
+ (XLenVT timm:$round),
+ VLOpFrag, (XLenVT timm:$policy))),
+ (!cast<Instruction>(
+ !if(isSEWAware,
+ inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
+ inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
+ (result_type result_reg_class:$merge),
+ (op2_type op2_reg_class:$rs2),
+ (mask_type V0),
+ (XLenVT timm:$round),
+ GPR:$vl, log2sew, (XLenVT timm:$policy))>;
+
+
class VPatMaskUnaryNoMask<string intrinsic_name,
string inst,
MTypeInfo mti> :
@@ -4611,6 +4715,20 @@ multiclass VPatUnaryV_V<string intrinsic, string instruction,
}
}
+multiclass VPatUnaryV_V_RM<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist, bit isSEWAware = 0> {
+ foreach vti = vtilist in {
+ let Predicates = GetVTypePredicates<vti>.Predicates in {
+ def : VPatUnaryNoMaskRoundingMode<intrinsic, instruction, "V",
+ vti.Vector, vti.Vector, vti.Log2SEW,
+ vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
+ def : VPatUnaryMaskRoundingMode<intrinsic, instruction, "V",
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
+ vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
+ }
+ }
+}
+
multiclass VPatNullaryV<string intrinsic, string instruction> {
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
@@ -6236,8 +6354,8 @@ defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF_RM;
//===----------------------------------------------------------------------===//
// 13.8. Vector Floating-Point Square-Root Instruction
//===----------------------------------------------------------------------===//
-let Uses = [FRM], mayRaiseFPException = true in
-defm PseudoVFSQRT : VPseudoVSQR_V;
+let mayRaiseFPException = true, hasSideEffects = 0 in
+defm PseudoVFSQRT : VPseudoVSQR_V_RM;
//===----------------------------------------------------------------------===//
// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
@@ -6248,8 +6366,8 @@ defm PseudoVFRSQRT7 : VPseudoVRCP_V;
//===----------------------------------------------------------------------===//
// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
//===----------------------------------------------------------------------===//
-let Uses = [FRM], mayRaiseFPException = true in
-defm PseudoVFREC7 : VPseudoVRCP_V;
+let mayRaiseFPException = true, hasSideEffects = 0 in
+defm PseudoVFREC7 : VPseudoVRCP_V_RM;
//===----------------------------------------------------------------------===//
// 13.11. Vector Floating-Point Min/Max Instructions
@@ -6898,7 +7016,7 @@ defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmsac", "PseudoVFWNMSAC",
//===----------------------------------------------------------------------===//
// 13.8. Vector Floating-Point Square-Root Instruction
//===----------------------------------------------------------------------===//
-defm : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors, /*isSEWAware*/ 1>;
+defm : VPatUnaryV_V_RM<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors, /*isSEWAware*/ 1>;
//===----------------------------------------------------------------------===//
// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
@@ -6908,7 +7026,7 @@ defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>;
//===----------------------------------------------------------------------===//
// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
//===----------------------------------------------------------------------===//
-defm : VPatUnaryV_V<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
+defm : VPatUnaryV_V_RM<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
//===----------------------------------------------------------------------===//
// 13.11. Vector Floating-Point Min/Max Instructions
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 874c1f42a2e19f..4abf9ad5159660 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1266,7 +1266,11 @@ foreach vti = AllFloatVectors in {
def : Pat<(any_fsqrt (vti.Vector vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX#"_E"#vti.SEW)
(vti.Vector (IMPLICIT_DEF)),
- vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>;
+ vti.RegClass:$rs2,
+ // Value to indicate no rounding mode change in
+ // RISCVInsertReadWriteCSR
+ FRM_DYN,
+ vti.AVL, vti.Log2SEW, TU_MU)>;
// 13.12. Vector Floating-Point Sign-Injection Instructions
def : Pat<(fabs (vti.Vector vti.RegClass:$rs)),
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 00ec04613c02a9..76232dd2299591 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2192,7 +2192,11 @@ foreach vti = AllFloatVectors in {
VLOpFrag),
(!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK")
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask V0),
+ // Value to indicate no rounding mode change in
+ // RISCVInsertReadWriteCSR
+ FRM_DYN,
+ GPR:$vl, vti.Log2SEW, TA_MA)>;
// 13.12. Vector Floating-Point Sign-Injection Instructions
def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0),
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index ff22275e25ba0a..bc8976d99e478f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -811,8 +811,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
@@ -825,7 +824,7 @@ entry:
<vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x i1> %1,
- iXLen %2, iXLen 3)
+ iXLen 7, iXLen %2, iXLen 3)
ret <vscale x 1 x half> %a
}
@@ -857,8 +856,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
@@ -871,7 +869,7 @@ entry:
<vscale x 1 x half> undef,
<vscale x 1 x half> %0,
<vscale x 1 x i1> %1,
- iXLen %2, iXLen 3)
+ iXLen 7, iXLen %2, iXLen 3)
ret <vscale x 1 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
index 4f02e2739810b9..9d286c20a97fa5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
@@ -769,8 +769,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen)
+ iXLen, iXLen, iXLen)
define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
@@ -782,7 +781,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 7, iXLen %3, iXLen 1)
ret <vscale x 1 x half> %a
}
@@ -813,8 +812,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen)
+ iXLen, iXLen, iXLen)
define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
@@ -826,7 +824,7 @@ entry:
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 7, iXLen %3, iXLen 1)
ret <vscale x 1 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
index 198c5583976122..6f73c9ff355c32 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
@@ -769,8 +769,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen)
+ iXLen, iXLen, iXLen)
define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
@@ -782,7 +781,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 2)
+ iXLen 7, iXLen %3, iXLen 2)
ret <vscale x 1 x half> %a
}
@@ -813,8 +812,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen)
+ iXLen, iXLen, iXLen)
define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
@@ -826,7 +824,7 @@ entry:
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %0,
- iXLen %3, iXLen 2)
+ iXLen 7, iXLen %3, iXLen 2)
ret <vscale x 1 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
index edfefb1658c63c..5bafcee19f9f61 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
@@ -769,8 +769,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen)
+ iXLen, iXLen, iXLen)
define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
@@ -782,7 +781,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 0)
+ iXLen 7, iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@@ -813,8 +812,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen)
+ iXLen, iXLen, iXLen)
define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
@@ -826,7 +824,7 @@ entry:
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %0,
- iXLen %3, iXLen 0)
+ iXLen 7, iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index a3f74ea3058e6d..1ab039e461c2d6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -308,7 +308,7 @@ define <vscale x 2 x i32> @vpmerge_vflcass(<vscale x 2 x i32> %passthru, <vscale
}
; Test riscv.vfsqrt
-declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i64)
+declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i64, i64)
define <vscale x 2 x float> @vpmerge_vfsqrt(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vfsqrt:
; CHECK: # %bb.0:
@@ -316,13 +316,13 @@ define <vscale x 2 x float> @vpmerge_vfsqrt(<vscale x 2 x float> %passthru, <vsc
; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
; CHECK-NEXT: ret
%1 = zext i32 %vl to i64
- %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 %1)
+ %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
%b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
ret <vscale x 2 x float> %b
}
; Test reciprocal operation by riscv.vfrec7
-declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i64)
+declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i64, i64)
define <vscale x 2 x float> @vpmerge_vfrec7(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vfrec7:
; CHECK: # %bb.0:
@@ -330,7 +330,7 @@ define <vscale x 2 x float> @vpmerge_vfrec7(<vscale x 2 x float> %passthru, <vsc
; CHECK-NEXT: vfrec7.v v8, v9, v0.t
; CHECK-NEXT: ret
%1 = zext i32 %vl to i64
- %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 %1)
+ %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
%b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
ret <vscale x 2 x float> %b
}
@@ -728,7 +728,7 @@ define <vscale x 2 x float> @vpselect_vfsqrt(<vscale x 2 x float> %passthru, <vs
; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
; CHECK-NEXT: ret
%1 = zext i32 %vl to i64
- %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 %1)
+ %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
%b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
ret <vscale x 2 x float> %b
}
@@ -741,7 +741,7 @@ define <vscale x 2 x float> @vpselect_vfrec7(<vscale x 2 x float> %passthru, <vs
; CHECK-NEXT: vfrec7.v v8, v9, v0.t
; CHECK-NEXT: ret
%1 = zext i32 %vl to i64
- %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 %1)
+ %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
%b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
ret <vscale x 2 x float> %b
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
index 367ce3ee22b954..446b3c63890ab2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
@@ -2192,7 +2192,7 @@ entry:
declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
@@ -2204,7 +2204,7 @@ entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
- iXLen %2)
+ iXLen 7, iXLen %2)
ret <vscale x 1 x half> %a
}
@@ -2232,7 +2232,7 @@ entry:
declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
@@ -2244,7 +2244,7 @@ entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
- iXLen %2)
+ iXLen 7, iXLen %2)
ret <vscale x 1 x half> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll
index b73a53424c1899..1651b44fe83642 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll
@@ -6,19 +6,21 @@
declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
<vscale x 1 x half> undef,
<vscale x 1 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x half> %a
}
@@ -27,21 +29,22 @@ declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x half> %a
}
@@ -49,19 +52,21 @@ entry:
declare <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x half> @intrinsic_vfrec7_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
<vscale x 2 x half> undef,
<vscale x 2 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x half> %a
}
@@ -70,21 +75,22 @@ declare <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x half> @intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16(
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x half> %a
}
@@ -92,19 +98,21 @@ entry:
declare <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x half> @intrinsic_vfrec7_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
<vscale x 4 x half> undef,
<vscale x 4 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x half> %a
}
@@ -113,21 +121,22 @@ declare <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x half> @intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16(
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x half> %a
}
@@ -135,19 +144,21 @@ entry:
declare <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x half> @intrinsic_vfrec7_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
<vscale x 8 x half> undef,
<vscale x 8 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x half> %a
}
@@ -156,21 +167,22 @@ declare <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x half> @intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16(
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x half> %a
}
@@ -178,19 +190,21 @@ entry:
declare <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x half> @intrinsic_vfrec7_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
<vscale x 16 x half> undef,
<vscale x 16 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 16 x half> %a
}
@@ -199,21 +213,22 @@ declare <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x half> @intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v12, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16(
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 16 x half> %a
}
@@ -221,19 +236,21 @@ entry:
declare <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 32 x half> @intrinsic_vfrec7_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
<vscale x 32 x half> undef,
<vscale x 32 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 32 x half> %a
}
@@ -242,21 +259,22 @@ declare <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 32 x half> @intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16(<vscale x 32 x i1> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v16, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16(
<vscale x 32 x half> %1,
<vscale x 32 x half> %2,
<vscale x 32 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 32 x half> %a
}
@@ -264,19 +282,21 @@ entry:
declare <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x float> @intrinsic_vfrec7_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
<vscale x 1 x float> undef,
<vscale x 1 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x float> %a
}
@@ -285,21 +305,22 @@ declare <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x float> @intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32(
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
<vscale x 1 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
@@ -307,19 +328,21 @@ entry:
declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x float> @intrinsic_vfrec7_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
<vscale x 2 x float> undef,
<vscale x 2 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x float> %a
}
@@ -328,21 +351,22 @@ declare <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x float> @intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32(
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
<vscale x 2 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x float> %a
}
@@ -350,19 +374,21 @@ entry:
declare <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x float> @intrinsic_vfrec7_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
<vscale x 4 x float> undef,
<vscale x 4 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x float> %a
}
@@ -371,21 +397,22 @@ declare <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x float> @intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32(
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
<vscale x 4 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x float> %a
}
@@ -393,19 +420,21 @@ entry:
declare <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x float> @intrinsic_vfrec7_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
<vscale x 8 x float> undef,
<vscale x 8 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x float> %a
}
@@ -414,21 +443,22 @@ declare <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x float> @intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v12, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32(
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
<vscale x 8 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x float> %a
}
@@ -436,19 +466,21 @@ entry:
declare <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x float> @intrinsic_vfrec7_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
<vscale x 16 x float> undef,
<vscale x 16 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 16 x float> %a
}
@@ -457,21 +489,22 @@ declare <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x float> @intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32(<vscale x 16 x i1> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v16, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32(
<vscale x 16 x float> %1,
<vscale x 16 x float> %2,
<vscale x 16 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 16 x float> %a
}
@@ -479,19 +512,21 @@ entry:
declare <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x double> @intrinsic_vfrec7_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
<vscale x 1 x double> undef,
<vscale x 1 x double> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x double> %a
}
@@ -500,21 +535,22 @@ declare <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x double> @intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64(
<vscale x 1 x double> %1,
<vscale x 1 x double> %2,
<vscale x 1 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x double> %a
}
@@ -522,19 +558,21 @@ entry:
declare <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x double> @intrinsic_vfrec7_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
<vscale x 2 x double> undef,
<vscale x 2 x double> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x double> %a
}
@@ -543,21 +581,22 @@ declare <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x double> @intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64(
<vscale x 2 x double> %1,
<vscale x 2 x double> %2,
<vscale x 2 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x double> %a
}
@@ -565,19 +604,21 @@ entry:
declare <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x double> @intrinsic_vfrec7_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
<vscale x 4 x double> undef,
<vscale x 4 x double> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x double> %a
}
@@ -586,21 +627,22 @@ declare <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x double> @intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v12, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64(
<vscale x 4 x double> %1,
<vscale x 4 x double> %2,
<vscale x 4 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x double> %a
}
@@ -608,19 +650,21 @@ entry:
declare <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x double> @intrinsic_vfrec7_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
<vscale x 8 x double> undef,
<vscale x 8 x double> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x double> %a
}
@@ -629,21 +673,22 @@ declare <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x double> @intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64(<vscale x 8 x i1> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfrec7.v v8, v16, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64(
<vscale x 8 x double> %1,
<vscale x 8 x double> %2,
<vscale x 8 x i1> %0,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll
index 6ba3523cf2bf9f..45583a1b77650a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll
@@ -6,19 +6,21 @@
declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
<vscale x 1 x half> undef,
<vscale x 1 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x half> %a
}
@@ -27,21 +29,22 @@ declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x half> %a
}
@@ -49,19 +52,21 @@ entry:
declare <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
<vscale x 2 x half> undef,
<vscale x 2 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x half> %a
}
@@ -70,21 +75,22 @@ declare <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x half> @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x half> %a
}
@@ -92,19 +98,21 @@ entry:
declare <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
<vscale x 4 x half> undef,
<vscale x 4 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x half> %a
}
@@ -113,21 +121,22 @@ declare <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x half> @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x half> %a
}
@@ -135,19 +144,21 @@ entry:
declare <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
<vscale x 8 x half> undef,
<vscale x 8 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x half> %a
}
@@ -156,21 +167,22 @@ declare <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x half> @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x half> %a
}
@@ -178,19 +190,21 @@ entry:
declare <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
<vscale x 16 x half> undef,
<vscale x 16 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 16 x half> %a
}
@@ -199,21 +213,22 @@ declare <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x half> @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v12, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 16 x half> %a
}
@@ -221,19 +236,21 @@ entry:
declare <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
<vscale x 32 x half> undef,
<vscale x 32 x half> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 32 x half> %a
}
@@ -242,21 +259,22 @@ declare <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 32 x half> @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v16, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.mask.nxv32f16(
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
<vscale x 32 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 32 x half> %a
}
@@ -264,19 +282,21 @@ entry:
declare <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
<vscale x 1 x float> undef,
<vscale x 1 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x float> %a
}
@@ -285,21 +305,22 @@ declare <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x float> @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
@@ -307,19 +328,21 @@ entry:
declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
<vscale x 2 x float> undef,
<vscale x 2 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x float> %a
}
@@ -328,21 +351,22 @@ declare <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x float> @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x float> %a
}
@@ -350,19 +374,21 @@ entry:
declare <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
<vscale x 4 x float> undef,
<vscale x 4 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x float> %a
}
@@ -371,21 +397,22 @@ declare <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x float> @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x float> %a
}
@@ -393,19 +420,21 @@ entry:
declare <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
<vscale x 8 x float> undef,
<vscale x 8 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x float> %a
}
@@ -414,21 +443,22 @@ declare <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x float> @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v12, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x float> %a
}
@@ -436,19 +466,21 @@ entry:
declare <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
<vscale x 16 x float> undef,
<vscale x 16 x float> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 16 x float> %a
}
@@ -457,21 +489,22 @@ declare <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x float> @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v16, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
<vscale x 16 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 16 x float> %a
}
@@ -479,19 +512,21 @@ entry:
declare <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x double> @intrinsic_vfsqrt_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
<vscale x 1 x double> undef,
<vscale x 1 x double> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x double> %a
}
@@ -500,21 +535,22 @@ declare <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x double> @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x double> %a
}
@@ -522,19 +558,21 @@ entry:
declare <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x double> @intrinsic_vfsqrt_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
<vscale x 2 x double> undef,
<vscale x 2 x double> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x double> %a
}
@@ -543,21 +581,22 @@ declare <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x double> @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x double> %a
}
@@ -565,19 +604,21 @@ entry:
declare <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x double> @intrinsic_vfsqrt_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
<vscale x 4 x double> undef,
<vscale x 4 x double> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x double> %a
}
@@ -586,21 +627,22 @@ declare <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x double> @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v12, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x double> %a
}
@@ -608,19 +650,21 @@ entry:
declare <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x double> @intrinsic_vfsqrt_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v8
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
<vscale x 8 x double> undef,
<vscale x 8 x double> %0,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x double> %a
}
@@ -629,21 +673,22 @@ declare <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x double> @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfsqrt.v v8, v16, v0.t
+; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64(
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x double> %a
}
More information about the llvm-commits
mailing list