[clang] c274384 - [NFC][RISCV] Update test cases through update_cc_test_checks.py.

Hsiangkai Wang via cfe-commits cfe-commits at lists.llvm.org
Sun Oct 3 00:45:14 PDT 2021


Author: Hsiangkai Wang
Date: 2021-10-03T15:44:06+08:00
New Revision: c274384cff1bbb2f1c840adeccd49e7dd952e119

URL: https://github.com/llvm/llvm-project/commit/c274384cff1bbb2f1c840adeccd49e7dd952e119
DIFF: https://github.com/llvm/llvm-project/commit/c274384cff1bbb2f1c840adeccd49e7dd952e119.diff

LOG: [NFC][RISCV] Update test cases through update_cc_test_checks.py.

Added: 
    

Modified: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnot.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
index b6cee3d3b6a8c..ad57ce2188441 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vaadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -616,7 +555,6 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -626,7 +564,6 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -656,7 +591,6 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -666,7 +600,6 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -676,7 +609,6 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +618,6 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -696,7 +627,6 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -706,7 +636,6 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -717,7 +646,6 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -727,7 +655,6 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -737,7 +664,6 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -747,7 +673,6 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -757,7 +682,6 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -767,7 +691,6 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -777,7 +700,6 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -787,7 +709,6 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -797,7 +718,6 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +727,6 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -817,7 +736,6 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +745,6 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +754,6 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +763,6 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -857,7 +772,6 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -867,7 +781,6 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -877,7 +790,6 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vaaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
index 2a8fb7d6dc073..5c90d3e970b5b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -499,7 +454,6 @@ vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -510,7 +464,6 @@ vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -521,7 +474,6 @@ vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -532,7 +484,6 @@ vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -543,7 +494,6 @@ vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -554,7 +504,6 @@ vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -565,7 +514,6 @@ vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -576,7 +524,6 @@ vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -587,7 +534,6 @@ vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -598,7 +544,6 @@ vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -609,7 +554,6 @@ vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -620,7 +564,6 @@ vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -631,7 +574,6 @@ vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -642,7 +584,6 @@ vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -653,7 +594,6 @@ vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -664,7 +604,6 @@ vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -675,7 +614,6 @@ vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -686,7 +624,6 @@ vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -697,7 +634,6 @@ vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -708,7 +644,6 @@ vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -719,7 +654,6 @@ vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -730,7 +664,6 @@ vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -741,7 +674,6 @@ vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -752,7 +684,6 @@ vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -763,7 +694,6 @@ vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -774,7 +704,6 @@ vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -785,7 +714,6 @@ vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -796,7 +724,6 @@ vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -807,7 +734,6 @@ vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -818,7 +744,6 @@ vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -829,7 +754,6 @@ vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -840,7 +764,6 @@ vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -851,7 +774,6 @@ vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -862,7 +784,6 @@ vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -873,7 +794,6 @@ vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -884,7 +804,6 @@ vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -895,7 +814,6 @@ vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -906,7 +824,6 @@ vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -917,7 +834,6 @@ vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -928,7 +844,6 @@ vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -939,7 +854,6 @@ vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -950,7 +864,6 @@ vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -961,7 +874,6 @@ vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
   return vadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
index e04f0e2391f85..c8ab85a591bd8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
@@ -9,6 +9,7 @@
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
 vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -17,6 +18,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
 vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -25,6 +27,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
 vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -33,6 +36,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
 vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -41,6 +45,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
 vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -49,6 +54,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
 vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -57,6 +63,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
 vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -65,6 +72,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
 vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -73,6 +81,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
 vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -81,6 +90,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
 vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -89,6 +99,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
 vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -97,6 +108,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
 vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -105,6 +117,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
 vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -113,6 +126,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
 vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -121,6 +135,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
 vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -129,6 +144,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
 vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -137,6 +153,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
 vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -145,6 +162,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
 vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -153,6 +171,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
 vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -161,6 +180,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
 vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -169,6 +189,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
 vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -177,6 +198,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
 vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -185,6 +207,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
 vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -193,6 +216,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
 vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -201,6 +225,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
 vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -209,6 +234,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
 vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -217,6 +243,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
 vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -225,6 +252,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
 vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -233,6 +261,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
 vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -241,6 +270,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
 vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -249,6 +279,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
 vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -257,6 +288,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
 vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -265,6 +297,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
 vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -273,6 +306,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
 vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -281,6 +315,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
 vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -289,6 +324,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
 vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -297,6 +333,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
 vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -305,6 +342,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
 vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -313,6 +351,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
 vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -321,6 +360,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
 vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -329,6 +369,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
 vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -337,6 +378,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
 vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -345,6 +387,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
 vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -353,6 +396,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
 vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -361,6 +405,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
 vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -369,6 +414,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
 vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -377,6 +423,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
 vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -385,6 +432,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
 vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -393,6 +441,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
 vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -401,6 +450,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
 vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -409,6 +459,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
 vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -417,6 +468,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
 vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -425,6 +477,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
 vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -433,6 +486,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
 vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -441,6 +495,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
 vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -449,6 +504,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
 vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -457,6 +513,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
 vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -465,6 +522,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
 vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -473,6 +531,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
 vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -481,6 +540,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
 vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -489,6 +549,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
 vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -497,6 +558,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
 vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -505,6 +567,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
 vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -513,6 +576,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
 vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -521,6 +585,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
 vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -529,6 +594,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
 vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -537,6 +603,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
 vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -545,6 +612,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
 vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -553,6 +621,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
 vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -561,6 +630,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
 vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -569,6 +639,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
 vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -577,6 +648,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
 vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -585,6 +657,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
 vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -593,6 +666,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
 vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -601,6 +675,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
 vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -609,6 +684,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
 vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -617,6 +693,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
 vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -625,6 +702,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
 vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -633,6 +711,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
 vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -641,6 +720,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
 vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -649,6 +729,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
 vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -657,6 +738,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
 vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -665,6 +747,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
 vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -673,6 +756,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
 vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -681,6 +765,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
 vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -689,6 +774,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
 vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -697,6 +783,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
 vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -705,6 +792,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
 vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vadd(op1, op2, vl);
 }
@@ -713,6 +801,7 @@ vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
 vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -721,6 +810,7 @@ vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
 vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -729,6 +819,7 @@ vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
 vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -737,6 +828,7 @@ vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
 vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -745,6 +837,7 @@ vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
 vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -753,6 +846,7 @@ vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
 vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -761,6 +855,7 @@ vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
 vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -769,6 +864,7 @@ vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
 vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -777,6 +873,7 @@ vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
 vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -785,6 +882,7 @@ vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
 vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -793,6 +891,7 @@ vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
 vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -801,6 +900,7 @@ vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
 vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -809,6 +909,7 @@ vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
 vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -817,6 +918,7 @@ vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
 vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -825,6 +927,7 @@ vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
 vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -833,6 +936,7 @@ vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
 vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -841,6 +945,7 @@ vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
 vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -849,6 +954,7 @@ vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
 vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -857,6 +963,7 @@ vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
 vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -865,6 +972,7 @@ vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
 vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -873,6 +981,7 @@ vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
 vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -881,6 +990,7 @@ vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
 vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -889,6 +999,7 @@ vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
 vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -897,6 +1008,7 @@ vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
 vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -905,6 +1017,7 @@ vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
 vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -913,6 +1026,7 @@ vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
 vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -921,6 +1035,7 @@ vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
 vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -929,6 +1044,7 @@ vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
 vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -937,6 +1053,7 @@ vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
 vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -945,6 +1062,7 @@ vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
 vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -953,6 +1071,7 @@ vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
 vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -961,6 +1080,7 @@ vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
 vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -969,6 +1089,7 @@ vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
 vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -977,6 +1098,7 @@ vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
 vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -985,6 +1107,7 @@ vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
 vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -993,6 +1116,7 @@ vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
 vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1001,6 +1125,7 @@ vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
 vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1009,6 +1134,7 @@ vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
 vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1017,6 +1143,7 @@ vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
 vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1025,6 +1152,7 @@ vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
 vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1033,6 +1161,7 @@ vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
 vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1041,6 +1170,7 @@ vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
 vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1049,6 +1179,7 @@ vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
 vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1057,6 +1188,7 @@ vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
 vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1065,6 +1197,7 @@ vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
 vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1073,6 +1206,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
 vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1081,6 +1215,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
 vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1089,6 +1224,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
 vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1097,6 +1233,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
 vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1105,6 +1242,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
 vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1113,6 +1251,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
 vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1121,6 +1260,7 @@ vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
 vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1129,6 +1269,7 @@ vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
 vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1137,6 +1278,7 @@ vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
 vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1145,6 +1287,7 @@ vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
 vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1153,6 +1296,7 @@ vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
 vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1161,6 +1305,7 @@ vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
 vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1169,6 +1314,7 @@ vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
 vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1177,6 +1323,7 @@ vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
 vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1185,6 +1332,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
 vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1193,6 +1341,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
 vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1201,6 +1350,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
 vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1209,6 +1359,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
 vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1217,6 +1368,7 @@ vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
 vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1225,6 +1377,7 @@ vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
 vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1233,6 +1386,7 @@ vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
 vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1241,6 +1395,7 @@ vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
 vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1249,6 +1404,7 @@ vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
 vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1257,6 +1413,7 @@ vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
 vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1265,6 +1422,7 @@ vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
 vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1273,6 +1431,7 @@ vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
 vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1281,6 +1440,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
 vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1289,6 +1449,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
 vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1297,6 +1458,7 @@ vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
 vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1305,6 +1467,7 @@ vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
 vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1313,6 +1476,7 @@ vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
 vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1321,6 +1485,7 @@ vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
 vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1329,6 +1494,7 @@ vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
 vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1337,6 +1503,7 @@ vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
 vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1345,6 +1512,7 @@ vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
 vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1353,6 +1521,7 @@ vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
 vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1361,6 +1530,7 @@ vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
 vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1369,6 +1539,7 @@ vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
 vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1377,6 +1548,7 @@ vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
 vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1385,6 +1557,7 @@ vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
 vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1393,6 +1566,7 @@ vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
 vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1401,6 +1575,7 @@ vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
 vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }
@@ -1409,6 +1584,7 @@ vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
 vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
 }

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c
index 49ceb9aabb224..87420436ee29d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamoaddei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamoaddei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamoaddei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamoaddei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamoaddei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamoaddei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamoaddei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamoaddei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamoaddei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamoaddei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamoaddei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamoaddei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamoaddei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamoaddei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamoaddei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamoaddei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamoaddei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamoaddei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamoaddei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamoaddei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamoaddei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamoaddei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamoaddei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamoaddei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamoaddei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamoaddei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamoaddei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamoaddei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamoaddei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamoaddei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamoaddei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamoaddei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamoaddei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamoaddei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamoaddei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamoaddei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamoaddei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamoaddei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamoaddei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamoaddei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamoaddei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vui
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamoaddei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamoaddei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint3
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamoaddei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint3
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamoaddei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint3
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamoaddei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vui
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamoaddei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint3
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamoaddei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint3
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamoaddei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint3
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamoaddei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint3
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamoaddei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuin
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamoaddei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint3
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamoaddei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint3
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamoaddei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint3
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamoaddei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamoaddei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamoaddei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamoaddei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m
   return vamoaddei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamoaddei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamoaddei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamoaddei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint6
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamoaddei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint6
   return vamoaddei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamoaddei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamoaddei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint6
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamoaddei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint6
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamoaddei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint6
   return vamoaddei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamoaddei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint6
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamoaddei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint6
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamoaddei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint6
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint6
   return vamoaddei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8m
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint1
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16m
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint3
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint6
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t
   return vamoaddei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16m
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16m
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2
   return vamoaddei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32m
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4
   return vamoaddei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m
   return vamoaddei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c
index 0a71bf88b5c42..281c13c3567a2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamoandei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamoandei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamoandei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamoandei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamoandei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamoandei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamoandei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamoandei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamoandei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamoandei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamoandei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamoandei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamoandei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamoandei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamoandei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamoandei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamoandei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamoandei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamoandei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamoandei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamoandei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamoandei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamoandei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamoandei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamoandei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamoandei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamoandei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamoandei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamoandei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamoandei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamoandei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamoandei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamoandei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamoandei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamoandei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamoandei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamoandei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamoandei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamoandei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamoandei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamoandei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vui
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamoandei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamoandei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint3
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamoandei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint3
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamoandei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint3
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamoandei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vui
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamoandei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint3
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamoandei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint3
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamoandei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint3
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamoandei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint3
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamoandei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuin
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamoandei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint3
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamoandei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint3
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamoandei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint3
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamoandei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamoandei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamoandei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamoandei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m
   return vamoandei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamoandei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamoandei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamoandei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint6
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamoandei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint6
   return vamoandei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamoandei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamoandei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint6
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamoandei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint6
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamoandei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint6
   return vamoandei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamoandei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint6
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamoandei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint6
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamoandei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint6
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamoandei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint6
   return vamoandei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8m
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint1
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16m
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint3
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint6
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t
   return vamoandei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16m
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16m
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2
   return vamoandei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32m
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4
   return vamoandei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m
   return vamoandei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c
index 4863c11b57b67..07f2894e814c5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamomaxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamomaxei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamomaxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamomaxei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamomaxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamomaxei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamomaxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamomaxei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamomaxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamomaxei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamomaxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamomaxei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamomaxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamomaxei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamomaxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamomaxei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamomaxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamomaxei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamomaxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamomaxei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamomaxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamomaxei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamomaxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamomaxei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamomaxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamomaxei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamomaxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamomaxei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamomaxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamomaxei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamomaxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamomaxei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamomaxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamomaxei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamomaxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamomaxei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamomaxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamomaxei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamomaxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamomaxei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamomaxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamomaxei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamomaxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamomaxei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamomaxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamomaxei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamomaxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamomaxei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamomaxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamomaxei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamomaxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamomaxei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamomaxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamomaxei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamomaxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamomaxei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamomaxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamomaxei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamomaxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamomaxei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamomaxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamomaxei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamomaxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamomaxei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamomaxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamomaxei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamomaxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamomaxei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamomaxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamomaxei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamomaxuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuin
   return vamomaxuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamomaxuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint3
   return vamomaxuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamomaxuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint3
   return vamomaxuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamomaxuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32
   return vamomaxuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamomaxuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32
   return vamomaxuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamomaxuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vu
   return vamomaxuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamomaxuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuin
   return vamomaxuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamomaxuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint
   return vamomaxuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamomaxuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint
   return vamomaxuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamomaxuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint
   return vamomaxuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamomaxuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vu
   return vamomaxuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamomaxuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint
   return vamomaxuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamomaxuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint
   return vamomaxuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamomaxuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint
   return vamomaxuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamomaxuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint
   return vamomaxuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamomaxuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vui
   return vamomaxuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamomaxuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint
   return vamomaxuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamomaxuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint
   return vamomaxuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamomaxuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint
   return vamomaxuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamomaxuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint6
   return vamomaxuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamomaxuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint6
   return vamomaxuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamomaxuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint6
   return vamomaxuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamomaxuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64
   return vamomaxuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamomaxuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuin
   return vamomaxuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamomaxuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuin
   return vamomaxuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamomaxuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint
   return vamomaxuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamomaxuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint
   return vamomaxuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamomaxuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuin
   return vamomaxuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamomaxuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint
   return vamomaxuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamomaxuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint
   return vamomaxuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamomaxuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint
   return vamomaxuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamomaxuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint
   return vamomaxuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamomaxuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint
   return vamomaxuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamomaxuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint
   return vamomaxuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamomaxuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint
   return vamomaxuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamomaxei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamomaxei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamomaxei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamomaxei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamomaxei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamomaxei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamomaxei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamomaxei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamomaxei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamomaxei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamomaxei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamomaxei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamomaxei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamomaxei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamomaxei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamomaxei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamomaxei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamomaxei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamomaxei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamomaxei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamomaxei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamomaxei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamomaxei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamomaxei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamomaxei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamomaxei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamomaxei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamomaxei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamomaxei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamomaxei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamomaxei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamomaxei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamomaxei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamomaxei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamomaxei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8
   return vamomaxuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf
   return vamomaxuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf
   return vamomaxuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_
   return vamomaxuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_
   return vamomaxuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamomaxuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16
   return vamomaxuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16
   return vamomaxuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m
   return vamomaxuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m
   return vamomaxuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamomaxuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32
   return vamomaxuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32
   return vamomaxuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m
   return vamomaxuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m
   return vamomaxuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamomaxuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64
   return vamomaxuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64
   return vamomaxuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m
   return vamomaxuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf
   return vamomaxuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf
   return vamomaxuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf
   return vamomaxuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_
   return vamomaxuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16
   return vamomaxuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16
   return vamomaxuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16
   return vamomaxuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m
   return vamomaxuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32
   return vamomaxuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32
   return vamomaxuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32
   return vamomaxuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m
   return vamomaxuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64
   return vamomaxuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64
   return vamomaxuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64
   return vamomaxuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c
index eba57e50d8b99..352e81d97642b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamominei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamominei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamominei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamominei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamominei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamominei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamominei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamominei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamominei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamominei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamominei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamominei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamominei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamominei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamominei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamominei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamominei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamominei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamominei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamominei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamominei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamominei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamominei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamominei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamominei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamominei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamominei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamominei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamominei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamominei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamominei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamominei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamominei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamominei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamominei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamominei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamominei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamominei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamominei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamominei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamominei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamominei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamominei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamominei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamominei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamominei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamominei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamominei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamominei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamominei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamominei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamominei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamominei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamominei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamominei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamominei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamominei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamominei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamominei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamominei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamominei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamominei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamominei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamominei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamominei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamominei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamominei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamominei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamominei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamominei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamominuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuin
   return vamominuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamominuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint3
   return vamominuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamominuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint3
   return vamominuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamominuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32
   return vamominuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamominuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32
   return vamominuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamominuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vu
   return vamominuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamominuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuin
   return vamominuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamominuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint
   return vamominuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamominuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint
   return vamominuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamominuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint
   return vamominuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamominuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vu
   return vamominuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamominuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint
   return vamominuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamominuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint
   return vamominuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamominuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint
   return vamominuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamominuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint
   return vamominuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamominuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vui
   return vamominuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamominuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint
   return vamominuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamominuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint
   return vamominuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamominuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint
   return vamominuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamominuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint6
   return vamominuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamominuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint6
   return vamominuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamominuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint6
   return vamominuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamominuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64
   return vamominuei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamominuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuin
   return vamominuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamominuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuin
   return vamominuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamominuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint
   return vamominuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamominuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint
   return vamominuei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamominuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuin
   return vamominuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamominuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint
   return vamominuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamominuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint
   return vamominuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamominuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint
   return vamominuei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamominuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint
   return vamominuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamominuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint
   return vamominuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamominuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint
   return vamominuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamominuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint
   return vamominuei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamominei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamominei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamominei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamominei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamominei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamominei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamominei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamominei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamominei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamominei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamominei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamominei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamominei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamominei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamominei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamominei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamominei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamominei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamominei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamominei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamominei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamominei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamominei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamominei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamominei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamominei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamominei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamominei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamominei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamominei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamominei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamominei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamominei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamominei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamominei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8
   return vamominuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf
   return vamominuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf
   return vamominuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_
   return vamominuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_
   return vamominuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamominuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16
   return vamominuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16
   return vamominuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m
   return vamominuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m
   return vamominuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamominuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32
   return vamominuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32
   return vamominuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m
   return vamominuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m
   return vamominuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint
   return vamominuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64
   return vamominuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64
   return vamominuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m
   return vamominuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf
   return vamominuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf
   return vamominuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf
   return vamominuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_
   return vamominuei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16
   return vamominuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16
   return vamominuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16
   return vamominuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m
   return vamominuei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32
   return vamominuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32
   return vamominuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32
   return vamominuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m
   return vamominuei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64
   return vamominuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64
   return vamominuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64
   return vamominuei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c
index 56cc72e004d46..43de42cbceb4f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamoorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamoorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamoorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamoorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamoorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamoorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamoorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamoorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamoorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamoorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamoorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamoorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamoorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamoorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamoorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamoorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32m
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamoorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamoorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamoorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamoorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamoorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamoorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamoorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamoorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamoorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamoorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamoorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamoorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamoorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamoorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamoorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamoorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamoorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamoorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamoorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamoorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint3
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamoorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamoorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamoorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamoorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamoorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuin
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamoorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint3
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamoorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamoorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamoorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamoorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuin
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamoorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamoorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamoorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamoorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamoorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamoorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamoorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamoorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamoorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamoorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamoorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamoorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8
   return vamoorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamoorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint6
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamoorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint6
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamoorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamoorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64
   return vamoorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamoorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint6
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamoorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamoorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamoorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64
   return vamoorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamoorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamoorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamoorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamoorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64
   return vamoorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bi
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bi
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bi
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t
   return vamoorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_
   return vamoorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_
   return vamoorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4
   return vamoorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c
index 9a7d81dc32761..d7646cd4c24dd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -17,7 +16,6 @@ vint32mf2_t test_vamoswapei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -29,7 +27,6 @@ vint32m1_t test_vamoswapei8_v_i32m1(int32_t *base, vuint8mf4_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -41,7 +38,6 @@ vint32m2_t test_vamoswapei8_v_i32m2(int32_t *base, vuint8mf2_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -53,7 +49,6 @@ vint32m4_t test_vamoswapei8_v_i32m4(int32_t *base, vuint8m1_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -65,7 +60,6 @@ vint32m8_t test_vamoswapei8_v_i32m8(int32_t *base, vuint8m2_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -77,7 +71,6 @@ vint32mf2_t test_vamoswapei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -89,7 +82,6 @@ vint32m1_t test_vamoswapei16_v_i32m1(int32_t *base, vuint16mf2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -101,7 +93,6 @@ vint32m2_t test_vamoswapei16_v_i32m2(int32_t *base, vuint16m1_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -113,7 +104,6 @@ vint32m4_t test_vamoswapei16_v_i32m4(int32_t *base, vuint16m2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -125,7 +115,6 @@ vint32m8_t test_vamoswapei16_v_i32m8(int32_t *base, vuint16m4_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -137,7 +126,6 @@ vint32mf2_t test_vamoswapei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -149,7 +137,6 @@ vint32m1_t test_vamoswapei32_v_i32m1(int32_t *base, vuint32m1_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -161,7 +148,6 @@ vint32m2_t test_vamoswapei32_v_i32m2(int32_t *base, vuint32m2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -173,7 +159,6 @@ vint32m4_t test_vamoswapei32_v_i32m4(int32_t *base, vuint32m4_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -185,7 +170,6 @@ vint32m8_t test_vamoswapei32_v_i32m8(int32_t *base, vuint32m8_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -197,7 +181,6 @@ vint32mf2_t test_vamoswapei64_v_i32mf2(int32_t *base, vuint64m1_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -209,7 +192,6 @@ vint32m1_t test_vamoswapei64_v_i32m1(int32_t *base, vuint64m2_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -221,7 +203,6 @@ vint32m2_t test_vamoswapei64_v_i32m2(int32_t *base, vuint64m4_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -233,7 +214,6 @@ vint32m4_t test_vamoswapei64_v_i32m4(int32_t *base, vuint64m8_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -245,7 +225,6 @@ vint64m1_t test_vamoswapei8_v_i64m1(int64_t *base, vuint8mf8_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -257,7 +236,6 @@ vint64m2_t test_vamoswapei8_v_i64m2(int64_t *base, vuint8mf4_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -269,7 +247,6 @@ vint64m4_t test_vamoswapei8_v_i64m4(int64_t *base, vuint8mf2_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -281,7 +258,6 @@ vint64m8_t test_vamoswapei8_v_i64m8(int64_t *base, vuint8m1_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -293,7 +269,6 @@ vint64m1_t test_vamoswapei16_v_i64m1(int64_t *base, vuint16mf4_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -305,7 +280,6 @@ vint64m2_t test_vamoswapei16_v_i64m2(int64_t *base, vuint16mf2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -317,7 +291,6 @@ vint64m4_t test_vamoswapei16_v_i64m4(int64_t *base, vuint16m1_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -329,7 +302,6 @@ vint64m8_t test_vamoswapei16_v_i64m8(int64_t *base, vuint16m2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -341,7 +313,6 @@ vint64m1_t test_vamoswapei32_v_i64m1(int64_t *base, vuint32mf2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -353,7 +324,6 @@ vint64m2_t test_vamoswapei32_v_i64m2(int64_t *base, vuint32m1_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -365,7 +335,6 @@ vint64m4_t test_vamoswapei32_v_i64m4(int64_t *base, vuint32m2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -377,7 +346,6 @@ vint64m8_t test_vamoswapei32_v_i64m8(int64_t *base, vuint32m4_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -389,7 +357,6 @@ vint64m1_t test_vamoswapei64_v_i64m1(int64_t *base, vuint64m1_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -401,7 +368,6 @@ vint64m2_t test_vamoswapei64_v_i64m2(int64_t *base, vuint64m2_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -413,7 +379,6 @@ vint64m4_t test_vamoswapei64_v_i64m4(int64_t *base, vuint64m4_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -425,7 +390,6 @@ vint64m8_t test_vamoswapei64_v_i64m8(int64_t *base, vuint64m8_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -437,7 +401,6 @@ vuint32mf2_t test_vamoswapei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -449,7 +412,6 @@ vuint32m1_t test_vamoswapei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -461,7 +423,6 @@ vuint32m2_t test_vamoswapei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -473,7 +434,6 @@ vuint32m4_t test_vamoswapei8_v_u32m4(uint32_t *base, vuint8m1_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -485,7 +445,6 @@ vuint32m8_t test_vamoswapei8_v_u32m8(uint32_t *base, vuint8m2_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -497,7 +456,6 @@ vuint32mf2_t test_vamoswapei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -509,7 +467,6 @@ vuint32m1_t test_vamoswapei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -521,7 +478,6 @@ vuint32m2_t test_vamoswapei16_v_u32m2(uint32_t *base, vuint16m1_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -533,7 +489,6 @@ vuint32m4_t test_vamoswapei16_v_u32m4(uint32_t *base, vuint16m2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -545,7 +500,6 @@ vuint32m8_t test_vamoswapei16_v_u32m8(uint32_t *base, vuint16m4_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -557,7 +511,6 @@ vuint32mf2_t test_vamoswapei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -569,7 +522,6 @@ vuint32m1_t test_vamoswapei32_v_u32m1(uint32_t *base, vuint32m1_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -581,7 +533,6 @@ vuint32m2_t test_vamoswapei32_v_u32m2(uint32_t *base, vuint32m2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -593,7 +544,6 @@ vuint32m4_t test_vamoswapei32_v_u32m4(uint32_t *base, vuint32m4_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -605,7 +555,6 @@ vuint32m8_t test_vamoswapei32_v_u32m8(uint32_t *base, vuint32m8_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -617,7 +566,6 @@ vuint32mf2_t test_vamoswapei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -629,7 +577,6 @@ vuint32m1_t test_vamoswapei64_v_u32m1(uint32_t *base, vuint64m2_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -641,7 +588,6 @@ vuint32m2_t test_vamoswapei64_v_u32m2(uint32_t *base, vuint64m4_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -653,7 +599,6 @@ vuint32m4_t test_vamoswapei64_v_u32m4(uint32_t *base, vuint64m8_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -665,7 +610,6 @@ vuint64m1_t test_vamoswapei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -677,7 +621,6 @@ vuint64m2_t test_vamoswapei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -689,7 +632,6 @@ vuint64m4_t test_vamoswapei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -701,7 +643,6 @@ vuint64m8_t test_vamoswapei8_v_u64m8(uint64_t *base, vuint8m1_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -713,7 +654,6 @@ vuint64m1_t test_vamoswapei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -725,7 +665,6 @@ vuint64m2_t test_vamoswapei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -737,7 +676,6 @@ vuint64m4_t test_vamoswapei16_v_u64m4(uint64_t *base, vuint16m1_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -749,7 +687,6 @@ vuint64m8_t test_vamoswapei16_v_u64m8(uint64_t *base, vuint16m2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -761,7 +698,6 @@ vuint64m1_t test_vamoswapei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -773,7 +709,6 @@ vuint64m2_t test_vamoswapei32_v_u64m2(uint64_t *base, vuint32m1_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -785,7 +720,6 @@ vuint64m4_t test_vamoswapei32_v_u64m4(uint64_t *base, vuint32m2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -797,7 +731,6 @@ vuint64m8_t test_vamoswapei32_v_u64m8(uint64_t *base, vuint32m4_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -809,7 +742,6 @@ vuint64m1_t test_vamoswapei64_v_u64m1(uint64_t *base, vuint64m1_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -821,7 +753,6 @@ vuint64m2_t test_vamoswapei64_v_u64m2(uint64_t *base, vuint64m2_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -833,7 +764,6 @@ vuint64m4_t test_vamoswapei64_v_u64m4(uint64_t *base, vuint64m4_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -845,7 +775,6 @@ vuint64m8_t test_vamoswapei64_v_u64m8(uint64_t *base, vuint64m8_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -857,7 +786,6 @@ vfloat32mf2_t test_vamoswapei8_v_f32mf2(float *base, vuint8mf8_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -869,7 +797,6 @@ vfloat32m1_t test_vamoswapei8_v_f32m1(float *base, vuint8mf4_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -881,7 +808,6 @@ vfloat32m2_t test_vamoswapei8_v_f32m2(float *base, vuint8mf2_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -893,7 +819,6 @@ vfloat32m4_t test_vamoswapei8_v_f32m4(float *base, vuint8m1_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -905,7 +830,6 @@ vfloat32m8_t test_vamoswapei8_v_f32m8(float *base, vuint8m2_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -917,7 +841,6 @@ vfloat32mf2_t test_vamoswapei16_v_f32mf2(float *base, vuint16mf4_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -929,7 +852,6 @@ vfloat32m1_t test_vamoswapei16_v_f32m1(float *base, vuint16mf2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -941,7 +863,6 @@ vfloat32m2_t test_vamoswapei16_v_f32m2(float *base, vuint16m1_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -953,7 +874,6 @@ vfloat32m4_t test_vamoswapei16_v_f32m4(float *base, vuint16m2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -965,7 +885,6 @@ vfloat32m8_t test_vamoswapei16_v_f32m8(float *base, vuint16m4_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -977,7 +896,6 @@ vfloat32mf2_t test_vamoswapei32_v_f32mf2(float *base, vuint32mf2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -989,7 +907,6 @@ vfloat32m1_t test_vamoswapei32_v_f32m1(float *base, vuint32m1_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1001,7 +918,6 @@ vfloat32m2_t test_vamoswapei32_v_f32m2(float *base, vuint32m2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1013,7 +929,6 @@ vfloat32m4_t test_vamoswapei32_v_f32m4(float *base, vuint32m4_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1025,7 +940,6 @@ vfloat32m8_t test_vamoswapei32_v_f32m8(float *base, vuint32m8_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1037,7 +951,6 @@ vfloat32mf2_t test_vamoswapei64_v_f32mf2(float *base, vuint64m1_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1049,7 +962,6 @@ vfloat32m1_t test_vamoswapei64_v_f32m1(float *base, vuint64m2_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1061,7 +973,6 @@ vfloat32m2_t test_vamoswapei64_v_f32m2(float *base, vuint64m4_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1073,7 +984,6 @@ vfloat32m4_t test_vamoswapei64_v_f32m4(float *base, vuint64m8_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1085,7 +995,6 @@ vfloat64m1_t test_vamoswapei8_v_f64m1(double *base, vuint8mf8_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1097,7 +1006,6 @@ vfloat64m2_t test_vamoswapei8_v_f64m2(double *base, vuint8mf4_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1109,7 +1017,6 @@ vfloat64m4_t test_vamoswapei8_v_f64m4(double *base, vuint8mf2_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1121,7 +1028,6 @@ vfloat64m8_t test_vamoswapei8_v_f64m8(double *base, vuint8m1_t bindex,
   return vamoswapei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1133,7 +1039,6 @@ vfloat64m1_t test_vamoswapei16_v_f64m1(double *base, vuint16mf4_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1145,7 +1050,6 @@ vfloat64m2_t test_vamoswapei16_v_f64m2(double *base, vuint16mf2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1157,7 +1061,6 @@ vfloat64m4_t test_vamoswapei16_v_f64m4(double *base, vuint16m1_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1169,7 +1072,6 @@ vfloat64m8_t test_vamoswapei16_v_f64m8(double *base, vuint16m2_t bindex,
   return vamoswapei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1181,7 +1083,6 @@ vfloat64m1_t test_vamoswapei32_v_f64m1(double *base, vuint32mf2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1193,7 +1094,6 @@ vfloat64m2_t test_vamoswapei32_v_f64m2(double *base, vuint32m1_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1205,7 +1105,6 @@ vfloat64m4_t test_vamoswapei32_v_f64m4(double *base, vuint32m2_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1217,7 +1116,6 @@ vfloat64m8_t test_vamoswapei32_v_f64m8(double *base, vuint32m4_t bindex,
   return vamoswapei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1229,7 +1127,6 @@ vfloat64m1_t test_vamoswapei64_v_f64m1(double *base, vuint64m1_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1241,7 +1138,6 @@ vfloat64m2_t test_vamoswapei64_v_f64m2(double *base, vuint64m2_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1253,7 +1149,6 @@ vfloat64m4_t test_vamoswapei64_v_f64m4(double *base, vuint64m4_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1265,7 +1160,6 @@ vfloat64m8_t test_vamoswapei64_v_f64m8(double *base, vuint64m8_t bindex,
   return vamoswapei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1278,7 +1172,6 @@ vint32mf2_t test_vamoswapei8_v_i32mf2_m(vbool64_t mask, int32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1184,6 @@ vint32m1_t test_vamoswapei8_v_i32m1_m(vbool32_t mask, int32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1304,7 +1196,6 @@ vint32m2_t test_vamoswapei8_v_i32m2_m(vbool16_t mask, int32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1317,7 +1208,6 @@ vint32m4_t test_vamoswapei8_v_i32m4_m(vbool8_t mask, int32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1330,7 +1220,6 @@ vint32m8_t test_vamoswapei8_v_i32m8_m(vbool4_t mask, int32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1343,7 +1232,6 @@ vint32mf2_t test_vamoswapei16_v_i32mf2_m(vbool64_t mask, int32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1356,7 +1244,6 @@ vint32m1_t test_vamoswapei16_v_i32m1_m(vbool32_t mask, int32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1369,7 +1256,6 @@ vint32m2_t test_vamoswapei16_v_i32m2_m(vbool16_t mask, int32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1382,7 +1268,6 @@ vint32m4_t test_vamoswapei16_v_i32m4_m(vbool8_t mask, int32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1395,7 +1280,6 @@ vint32m8_t test_vamoswapei16_v_i32m8_m(vbool4_t mask, int32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1408,7 +1292,6 @@ vint32mf2_t test_vamoswapei32_v_i32mf2_m(vbool64_t mask, int32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1421,7 +1304,6 @@ vint32m1_t test_vamoswapei32_v_i32m1_m(vbool32_t mask, int32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1434,7 +1316,6 @@ vint32m2_t test_vamoswapei32_v_i32m2_m(vbool16_t mask, int32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1447,7 +1328,6 @@ vint32m4_t test_vamoswapei32_v_i32m4_m(vbool8_t mask, int32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1460,7 +1340,6 @@ vint32m8_t test_vamoswapei32_v_i32m8_m(vbool4_t mask, int32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1473,7 +1352,6 @@ vint32mf2_t test_vamoswapei64_v_i32mf2_m(vbool64_t mask, int32_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1486,7 +1364,6 @@ vint32m1_t test_vamoswapei64_v_i32m1_m(vbool32_t mask, int32_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1499,7 +1376,6 @@ vint32m2_t test_vamoswapei64_v_i32m2_m(vbool16_t mask, int32_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1512,7 +1388,6 @@ vint32m4_t test_vamoswapei64_v_i32m4_m(vbool8_t mask, int32_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1525,7 +1400,6 @@ vint64m1_t test_vamoswapei8_v_i64m1_m(vbool64_t mask, int64_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1538,7 +1412,6 @@ vint64m2_t test_vamoswapei8_v_i64m2_m(vbool32_t mask, int64_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1551,7 +1424,6 @@ vint64m4_t test_vamoswapei8_v_i64m4_m(vbool16_t mask, int64_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1564,7 +1436,6 @@ vint64m8_t test_vamoswapei8_v_i64m8_m(vbool8_t mask, int64_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1577,7 +1448,6 @@ vint64m1_t test_vamoswapei16_v_i64m1_m(vbool64_t mask, int64_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1590,7 +1460,6 @@ vint64m2_t test_vamoswapei16_v_i64m2_m(vbool32_t mask, int64_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1603,7 +1472,6 @@ vint64m4_t test_vamoswapei16_v_i64m4_m(vbool16_t mask, int64_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1616,7 +1484,6 @@ vint64m8_t test_vamoswapei16_v_i64m8_m(vbool8_t mask, int64_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1629,7 +1496,6 @@ vint64m1_t test_vamoswapei32_v_i64m1_m(vbool64_t mask, int64_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1642,7 +1508,6 @@ vint64m2_t test_vamoswapei32_v_i64m2_m(vbool32_t mask, int64_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1655,7 +1520,6 @@ vint64m4_t test_vamoswapei32_v_i64m4_m(vbool16_t mask, int64_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1668,7 +1532,6 @@ vint64m8_t test_vamoswapei32_v_i64m8_m(vbool8_t mask, int64_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1681,7 +1544,6 @@ vint64m1_t test_vamoswapei64_v_i64m1_m(vbool64_t mask, int64_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1694,7 +1556,6 @@ vint64m2_t test_vamoswapei64_v_i64m2_m(vbool32_t mask, int64_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1707,7 +1568,6 @@ vint64m4_t test_vamoswapei64_v_i64m4_m(vbool16_t mask, int64_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1720,7 +1580,6 @@ vint64m8_t test_vamoswapei64_v_i64m8_m(vbool8_t mask, int64_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1733,7 +1592,6 @@ vuint32mf2_t test_vamoswapei8_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1746,7 +1604,6 @@ vuint32m1_t test_vamoswapei8_v_u32m1_m(vbool32_t mask, uint32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1759,7 +1616,6 @@ vuint32m2_t test_vamoswapei8_v_u32m2_m(vbool16_t mask, uint32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1772,7 +1628,6 @@ vuint32m4_t test_vamoswapei8_v_u32m4_m(vbool8_t mask, uint32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1785,7 +1640,6 @@ vuint32m8_t test_vamoswapei8_v_u32m8_m(vbool4_t mask, uint32_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1798,7 +1652,6 @@ vuint32mf2_t test_vamoswapei16_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1811,7 +1664,6 @@ vuint32m1_t test_vamoswapei16_v_u32m1_m(vbool32_t mask, uint32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1824,7 +1676,6 @@ vuint32m2_t test_vamoswapei16_v_u32m2_m(vbool16_t mask, uint32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1837,7 +1688,6 @@ vuint32m4_t test_vamoswapei16_v_u32m4_m(vbool8_t mask, uint32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1850,7 +1700,6 @@ vuint32m8_t test_vamoswapei16_v_u32m8_m(vbool4_t mask, uint32_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1863,7 +1712,6 @@ vuint32mf2_t test_vamoswapei32_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1876,7 +1724,6 @@ vuint32m1_t test_vamoswapei32_v_u32m1_m(vbool32_t mask, uint32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1889,7 +1736,6 @@ vuint32m2_t test_vamoswapei32_v_u32m2_m(vbool16_t mask, uint32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1902,7 +1748,6 @@ vuint32m4_t test_vamoswapei32_v_u32m4_m(vbool8_t mask, uint32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1915,7 +1760,6 @@ vuint32m8_t test_vamoswapei32_v_u32m8_m(vbool4_t mask, uint32_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1928,7 +1772,6 @@ vuint32mf2_t test_vamoswapei64_v_u32mf2_m(vbool64_t mask, uint32_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1941,7 +1784,6 @@ vuint32m1_t test_vamoswapei64_v_u32m1_m(vbool32_t mask, uint32_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1954,7 +1796,6 @@ vuint32m2_t test_vamoswapei64_v_u32m2_m(vbool16_t mask, uint32_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1967,7 +1808,6 @@ vuint32m4_t test_vamoswapei64_v_u32m4_m(vbool8_t mask, uint32_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1980,7 +1820,6 @@ vuint64m1_t test_vamoswapei8_v_u64m1_m(vbool64_t mask, uint64_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1993,7 +1832,6 @@ vuint64m2_t test_vamoswapei8_v_u64m2_m(vbool32_t mask, uint64_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2006,7 +1844,6 @@ vuint64m4_t test_vamoswapei8_v_u64m4_m(vbool16_t mask, uint64_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2019,7 +1856,6 @@ vuint64m8_t test_vamoswapei8_v_u64m8_m(vbool8_t mask, uint64_t *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2032,7 +1868,6 @@ vuint64m1_t test_vamoswapei16_v_u64m1_m(vbool64_t mask, uint64_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2045,7 +1880,6 @@ vuint64m2_t test_vamoswapei16_v_u64m2_m(vbool32_t mask, uint64_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2058,7 +1892,6 @@ vuint64m4_t test_vamoswapei16_v_u64m4_m(vbool16_t mask, uint64_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2071,7 +1904,6 @@ vuint64m8_t test_vamoswapei16_v_u64m8_m(vbool8_t mask, uint64_t *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2084,7 +1916,6 @@ vuint64m1_t test_vamoswapei32_v_u64m1_m(vbool64_t mask, uint64_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2097,7 +1928,6 @@ vuint64m2_t test_vamoswapei32_v_u64m2_m(vbool32_t mask, uint64_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2110,7 +1940,6 @@ vuint64m4_t test_vamoswapei32_v_u64m4_m(vbool16_t mask, uint64_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2123,7 +1952,6 @@ vuint64m8_t test_vamoswapei32_v_u64m8_m(vbool8_t mask, uint64_t *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -2136,7 +1964,6 @@ vuint64m1_t test_vamoswapei64_v_u64m1_m(vbool64_t mask, uint64_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -2149,7 +1976,6 @@ vuint64m2_t test_vamoswapei64_v_u64m2_m(vbool32_t mask, uint64_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -2162,7 +1988,6 @@ vuint64m4_t test_vamoswapei64_v_u64m4_m(vbool16_t mask, uint64_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -2175,7 +2000,6 @@ vuint64m8_t test_vamoswapei64_v_u64m8_m(vbool8_t mask, uint64_t *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2188,7 +2012,6 @@ vfloat32mf2_t test_vamoswapei8_v_f32mf2_m(vbool64_t mask, float *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2201,7 +2024,6 @@ vfloat32m1_t test_vamoswapei8_v_f32m1_m(vbool32_t mask, float *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2214,7 +2036,6 @@ vfloat32m2_t test_vamoswapei8_v_f32m2_m(vbool16_t mask, float *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2227,7 +2048,6 @@ vfloat32m4_t test_vamoswapei8_v_f32m4_m(vbool8_t mask, float *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -2240,7 +2060,6 @@ vfloat32m8_t test_vamoswapei8_v_f32m8_m(vbool4_t mask, float *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2253,7 +2072,6 @@ vfloat32mf2_t test_vamoswapei16_v_f32mf2_m(vbool64_t mask, float *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2266,7 +2084,6 @@ vfloat32m1_t test_vamoswapei16_v_f32m1_m(vbool32_t mask, float *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2279,7 +2096,6 @@ vfloat32m2_t test_vamoswapei16_v_f32m2_m(vbool16_t mask, float *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2292,7 +2108,6 @@ vfloat32m4_t test_vamoswapei16_v_f32m4_m(vbool8_t mask, float *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -2305,7 +2120,6 @@ vfloat32m8_t test_vamoswapei16_v_f32m8_m(vbool4_t mask, float *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2318,7 +2132,6 @@ vfloat32mf2_t test_vamoswapei32_v_f32mf2_m(vbool64_t mask, float *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2331,7 +2144,6 @@ vfloat32m1_t test_vamoswapei32_v_f32m1_m(vbool32_t mask, float *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2344,7 +2156,6 @@ vfloat32m2_t test_vamoswapei32_v_f32m2_m(vbool16_t mask, float *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2357,7 +2168,6 @@ vfloat32m4_t test_vamoswapei32_v_f32m4_m(vbool8_t mask, float *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -2370,7 +2180,6 @@ vfloat32m8_t test_vamoswapei32_v_f32m8_m(vbool4_t mask, float *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -2383,7 +2192,6 @@ vfloat32mf2_t test_vamoswapei64_v_f32mf2_m(vbool64_t mask, float *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -2396,7 +2204,6 @@ vfloat32m1_t test_vamoswapei64_v_f32m1_m(vbool32_t mask, float *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -2409,7 +2216,6 @@ vfloat32m2_t test_vamoswapei64_v_f32m2_m(vbool16_t mask, float *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -2422,7 +2228,6 @@ vfloat32m4_t test_vamoswapei64_v_f32m4_m(vbool8_t mask, float *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2435,7 +2240,6 @@ vfloat64m1_t test_vamoswapei8_v_f64m1_m(vbool64_t mask, double *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2448,7 +2252,6 @@ vfloat64m2_t test_vamoswapei8_v_f64m2_m(vbool32_t mask, double *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2461,7 +2264,6 @@ vfloat64m4_t test_vamoswapei8_v_f64m4_m(vbool16_t mask, double *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2474,7 +2276,6 @@ vfloat64m8_t test_vamoswapei8_v_f64m8_m(vbool8_t mask, double *base,
   return vamoswapei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2487,7 +2288,6 @@ vfloat64m1_t test_vamoswapei16_v_f64m1_m(vbool64_t mask, double *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2500,7 +2300,6 @@ vfloat64m2_t test_vamoswapei16_v_f64m2_m(vbool32_t mask, double *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2513,7 +2312,6 @@ vfloat64m4_t test_vamoswapei16_v_f64m4_m(vbool16_t mask, double *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2526,7 +2324,6 @@ vfloat64m8_t test_vamoswapei16_v_f64m8_m(vbool8_t mask, double *base,
   return vamoswapei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2539,7 +2336,6 @@ vfloat64m1_t test_vamoswapei32_v_f64m1_m(vbool64_t mask, double *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2552,7 +2348,6 @@ vfloat64m2_t test_vamoswapei32_v_f64m2_m(vbool32_t mask, double *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2565,7 +2360,6 @@ vfloat64m4_t test_vamoswapei32_v_f64m4_m(vbool16_t mask, double *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2578,7 +2372,6 @@ vfloat64m8_t test_vamoswapei32_v_f64m8_m(vbool8_t mask, double *base,
   return vamoswapei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2591,7 +2384,6 @@ vfloat64m1_t test_vamoswapei64_v_f64m1_m(vbool64_t mask, double *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2604,7 +2396,6 @@ vfloat64m2_t test_vamoswapei64_v_f64m2_m(vbool32_t mask, double *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2617,7 +2408,6 @@ vfloat64m4_t test_vamoswapei64_v_f64m4_m(vbool16_t mask, double *base,
   return vamoswapei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c
index 26e405e30dee0..9e162a2de5c22 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -15,7 +14,6 @@ vint32mf2_t test_vamoxorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32m
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -26,7 +24,6 @@ vint32m1_t test_vamoxorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -37,7 +34,6 @@ vint32m2_t test_vamoxorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -48,7 +44,6 @@ vint32m4_t test_vamoxorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -59,7 +54,6 @@ vint32m8_t test_vamoxorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -70,7 +64,6 @@ vint32mf2_t test_vamoxorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint3
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -81,7 +74,6 @@ vint32m1_t test_vamoxorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -92,7 +84,6 @@ vint32m2_t test_vamoxorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -103,7 +94,6 @@ vint32m4_t test_vamoxorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -114,7 +104,6 @@ vint32m8_t test_vamoxorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -125,7 +114,6 @@ vint32mf2_t test_vamoxorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint3
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -136,7 +124,6 @@ vint32m1_t test_vamoxorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -147,7 +134,6 @@ vint32m2_t test_vamoxorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -158,7 +144,6 @@ vint32m4_t test_vamoxorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -169,7 +154,6 @@ vint32m8_t test_vamoxorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -180,7 +164,6 @@ vint32mf2_t test_vamoxorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -191,7 +174,6 @@ vint32m1_t test_vamoxorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -202,7 +184,6 @@ vint32m2_t test_vamoxorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -213,7 +194,6 @@ vint32m4_t test_vamoxorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -224,7 +204,6 @@ vint64m1_t test_vamoxorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -235,7 +214,6 @@ vint64m2_t test_vamoxorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -246,7 +224,6 @@ vint64m4_t test_vamoxorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -257,7 +234,6 @@ vint64m8_t test_vamoxorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -268,7 +244,6 @@ vint64m1_t test_vamoxorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -279,7 +254,6 @@ vint64m2_t test_vamoxorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -290,7 +264,6 @@ vint64m4_t test_vamoxorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -301,7 +274,6 @@ vint64m8_t test_vamoxorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -312,7 +284,6 @@ vint64m1_t test_vamoxorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -323,7 +294,6 @@ vint64m2_t test_vamoxorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -334,7 +304,6 @@ vint64m4_t test_vamoxorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -345,7 +314,6 @@ vint64m8_t test_vamoxorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -356,7 +324,6 @@ vint64m1_t test_vamoxorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -367,7 +334,6 @@ vint64m2_t test_vamoxorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -378,7 +344,6 @@ vint64m4_t test_vamoxorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -389,7 +354,6 @@ vint64m8_t test_vamoxorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -400,7 +364,6 @@ vuint32mf2_t test_vamoxorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -411,7 +374,6 @@ vuint32m1_t test_vamoxorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -422,7 +384,6 @@ vuint32m2_t test_vamoxorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -433,7 +394,6 @@ vuint32m4_t test_vamoxorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -444,7 +404,6 @@ vuint32m8_t test_vamoxorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -455,7 +414,6 @@ vuint32mf2_t test_vamoxorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vui
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -466,7 +424,6 @@ vuint32m1_t test_vamoxorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -477,7 +434,6 @@ vuint32m2_t test_vamoxorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint3
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -488,7 +444,6 @@ vuint32m4_t test_vamoxorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint3
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -499,7 +454,6 @@ vuint32m8_t test_vamoxorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint3
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -510,7 +464,6 @@ vuint32mf2_t test_vamoxorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vui
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -521,7 +474,6 @@ vuint32m1_t test_vamoxorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint3
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -532,7 +484,6 @@ vuint32m2_t test_vamoxorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint3
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -543,7 +494,6 @@ vuint32m4_t test_vamoxorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint3
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -554,7 +504,6 @@ vuint32m8_t test_vamoxorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint3
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -565,7 +514,6 @@ vuint32mf2_t test_vamoxorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuin
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -576,7 +524,6 @@ vuint32m1_t test_vamoxorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint3
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -587,7 +534,6 @@ vuint32m2_t test_vamoxorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint3
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -598,7 +544,6 @@ vuint32m4_t test_vamoxorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint3
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -609,7 +554,6 @@ vuint64m1_t test_vamoxorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -620,7 +564,6 @@ vuint64m2_t test_vamoxorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -631,7 +574,6 @@ vuint64m4_t test_vamoxorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -642,7 +584,6 @@ vuint64m8_t test_vamoxorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m
   return vamoxorei8(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -653,7 +594,6 @@ vuint64m1_t test_vamoxorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -664,7 +604,6 @@ vuint64m2_t test_vamoxorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -675,7 +614,6 @@ vuint64m4_t test_vamoxorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint6
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -686,7 +624,6 @@ vuint64m8_t test_vamoxorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint6
   return vamoxorei16(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -697,7 +634,6 @@ vuint64m1_t test_vamoxorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -708,7 +644,6 @@ vuint64m2_t test_vamoxorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint6
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -719,7 +654,6 @@ vuint64m4_t test_vamoxorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint6
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -730,7 +664,6 @@ vuint64m8_t test_vamoxorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint6
   return vamoxorei32(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -741,7 +674,6 @@ vuint64m1_t test_vamoxorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint6
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -752,7 +684,6 @@ vuint64m2_t test_vamoxorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint6
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -763,7 +694,6 @@ vuint64m4_t test_vamoxorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint6
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -774,7 +704,6 @@ vuint64m8_t test_vamoxorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint6
   return vamoxorei64(base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -785,7 +714,6 @@ vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -796,7 +724,6 @@ vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -807,7 +734,6 @@ vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -818,7 +744,6 @@ vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -829,7 +754,6 @@ vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -840,7 +764,6 @@ vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -851,7 +774,6 @@ vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -862,7 +784,6 @@ vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -873,7 +794,6 @@ vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -884,7 +804,6 @@ vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -895,7 +814,6 @@ vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -906,7 +824,6 @@ vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -917,7 +834,6 @@ vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -928,7 +844,6 @@ vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -939,7 +854,6 @@ vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -950,7 +864,6 @@ vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -961,7 +874,6 @@ vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -972,7 +884,6 @@ vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -983,7 +894,6 @@ vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -994,7 +904,6 @@ vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1005,7 +914,6 @@ vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1016,7 +924,6 @@ vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1027,7 +934,6 @@ vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1038,7 +944,6 @@ vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1049,7 +954,6 @@ vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1060,7 +964,6 @@ vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1071,7 +974,6 @@ vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1082,7 +984,6 @@ vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1093,7 +994,6 @@ vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1104,7 +1004,6 @@ vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1115,7 +1014,6 @@ vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1126,7 +1024,6 @@ vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1137,7 +1034,6 @@ vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1148,7 +1044,6 @@ vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1159,7 +1054,6 @@ vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1170,7 +1064,6 @@ vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8m
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1181,7 +1074,6 @@ vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1192,7 +1084,6 @@ vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1203,7 +1094,6 @@ vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1214,7 +1104,6 @@ vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1225,7 +1114,6 @@ vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint1
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1236,7 +1124,6 @@ vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16m
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1247,7 +1134,6 @@ vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1258,7 +1144,6 @@ vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1269,7 +1154,6 @@ vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1280,7 +1164,6 @@ vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint3
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1291,7 +1174,6 @@ vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1302,7 +1184,6 @@ vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1313,7 +1194,6 @@ vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1324,7 +1204,6 @@ vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1335,7 +1214,6 @@ vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint6
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1346,7 +1224,6 @@ vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1357,7 +1234,6 @@ vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1368,7 +1244,6 @@ vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1379,7 +1254,6 @@ vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1390,7 +1264,6 @@ vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1401,7 +1274,6 @@ vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1412,7 +1284,6 @@ vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t
   return vamoxorei8(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1423,7 +1294,6 @@ vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16m
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1434,7 +1304,6 @@ vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16m
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1445,7 +1314,6 @@ vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1456,7 +1324,6 @@ vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2
   return vamoxorei16(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1467,7 +1334,6 @@ vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32m
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1478,7 +1344,6 @@ vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1489,7 +1354,6 @@ vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1500,7 +1364,6 @@ vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4
   return vamoxorei32(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1511,7 +1374,6 @@ vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1522,7 +1384,6 @@ vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1533,7 +1394,6 @@ vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m
   return vamoxorei64(mask, base, bindex, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
index f46d022ef3c07..b88e759c21e13 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vand_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
index e5acb28e3ad99..1a142979ce6c0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vasub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -616,7 +555,6 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -626,7 +564,6 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -656,7 +591,6 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -666,7 +600,6 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -676,7 +609,6 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +618,6 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -696,7 +627,6 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -706,7 +636,6 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -717,7 +646,6 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -727,7 +655,6 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -737,7 +664,6 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -747,7 +673,6 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -757,7 +682,6 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -767,7 +691,6 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -777,7 +700,6 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -787,7 +709,6 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -797,7 +718,6 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +727,6 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -817,7 +736,6 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +745,6 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +754,6 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +763,6 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -857,7 +772,6 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -867,7 +781,6 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -877,7 +790,6 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vasubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
index 5968a748ced5a..d73d173d3356e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vdivu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
index d0d918cb19b12..4ced9b358676e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
   return vfabs(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
   return vfabs(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
   return vfabs(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
   return vfabs(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
   return vfabs(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
   return vfabs(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
   return vfabs(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
   return vfabs(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfabs_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
index 1f6ed0d783c2d..942e8bd6f1d39 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
@@ -10,6 +10,7 @@
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
 vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -18,6 +19,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
 vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -26,6 +28,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
 vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -34,6 +37,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
 vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -42,6 +46,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
 vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -50,6 +55,7 @@ vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
 vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -58,6 +64,7 @@ vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
 vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -66,6 +73,7 @@ vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
 vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -74,6 +82,7 @@ vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
 vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -82,6 +91,7 @@ vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
 vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -90,6 +100,7 @@ vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
 vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -98,6 +109,7 @@ vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
 vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -106,6 +118,7 @@ vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
 vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -114,6 +127,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
 vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -122,6 +136,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
 vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -130,6 +145,7 @@ vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
 vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -138,6 +154,7 @@ vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
 vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -146,6 +163,7 @@ vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
 vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -154,6 +172,7 @@ vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
 vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -162,6 +181,7 @@ vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
 vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -170,6 +190,7 @@ vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
 vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -178,6 +199,7 @@ vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
 vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -186,6 +208,7 @@ vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
 vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -194,6 +217,7 @@ vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
 vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -202,6 +226,7 @@ vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
 vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -210,6 +235,7 @@ vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
 vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -218,6 +244,7 @@ vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
 vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -226,6 +253,7 @@ vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
 vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }
@@ -234,6 +262,7 @@ vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) {
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
 vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vfadd(op1, op2, vl);
 }

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
index 131182f09cfc4..7df3f86f4b0d5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
   return vfclass(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
   return vfclass(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
   return vfclass(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
   return vfclass(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
   return vfclass(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
   return vfclass(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
   return vfclass(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
   return vfclass(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
   return vfclass(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vfclass(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +106,6 @@ vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vfclass(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +116,6 @@ vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vfclass(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +126,6 @@ vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vfclass(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vfclass(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vfclass(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vfclass(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vfclass(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
index 20534fe064260..0702c205967aa 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
   return vfcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
   return vfcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
   return vfcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
   return vfcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
   return vfcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
   return vfcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
   return vfcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
   return vfcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
   return vfcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
   return vfcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
   return vfcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
   return vfcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
   return vfcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
   return vfcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
   return vfcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
   return vfcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
   return vfcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
   return vfcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
   return vfcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
   return vfcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
   return vfcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
   return vfcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
   return vfcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
   return vfcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
   return vfcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
   return vfcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
   return vfcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
   return vfcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -395,7 +356,6 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
   return vfcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -405,7 +365,6 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
   return vfcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -415,7 +374,6 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
   return vfcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -425,7 +383,6 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
   return vfcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -435,7 +392,6 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
   return vfcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -445,7 +401,6 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
   return vfcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -455,7 +410,6 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
   return vfcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -465,7 +419,6 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
   return vfcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -475,7 +428,6 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -485,7 +437,6 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -495,7 +446,6 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -505,7 +455,6 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -515,7 +464,6 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -525,7 +473,6 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -535,7 +482,6 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
   return vfcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
index 3379924714604..54b1a35a35c73 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c
index 70d2264238821..4fbd882f805a8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -12,7 +11,6 @@
 //
 long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -20,7 +18,6 @@ long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst(op1, vl); }
 //
 long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -28,7 +25,6 @@ long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst(op1, vl); }
 //
 long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -36,7 +32,6 @@ long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst(op1, vl); }
 //
 long test_vfirst_m_b8(vbool8_t op1, size_t vl) { return vfirst(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +39,6 @@ long test_vfirst_m_b8(vbool8_t op1, size_t vl) { return vfirst(op1, vl); }
 //
 long test_vfirst_m_b16(vbool16_t op1, size_t vl) { return vfirst(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -52,7 +46,6 @@ long test_vfirst_m_b16(vbool16_t op1, size_t vl) { return vfirst(op1, vl); }
 //
 long test_vfirst_m_b32(vbool32_t op1, size_t vl) { return vfirst(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -60,7 +53,6 @@ long test_vfirst_m_b32(vbool32_t op1, size_t vl) { return vfirst(op1, vl); }
 //
 long test_vfirst_m_b64(vbool64_t op1, size_t vl) { return vfirst(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -70,7 +62,6 @@ long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
   return vfirst(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -80,7 +71,6 @@ long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
   return vfirst(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -90,7 +80,6 @@ long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
   return vfirst(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -100,7 +89,6 @@ long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
   return vfirst(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -110,7 +98,6 @@ long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
   return vfirst(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -120,7 +107,6 @@ long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
   return vfirst(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfirst_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
index 54174fa05a12d..b7b5ab2b1f825 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +206,6 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -238,7 +217,6 @@ vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +227,6 @@ vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -261,7 +238,6 @@ vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +248,6 @@ vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -284,7 +259,6 @@ vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +269,6 @@ vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -307,7 +280,6 @@ vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +290,6 @@ vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -330,7 +301,6 @@ vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +311,6 @@ vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -353,7 +322,6 @@ vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +332,6 @@ vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -376,7 +343,6 @@ vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +353,6 @@ vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -399,7 +364,6 @@ vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
index fda3ee2a52a4c..4fe2854f28e14 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +206,6 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -238,7 +217,6 @@ vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +227,6 @@ vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -261,7 +238,6 @@ vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +248,6 @@ vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -284,7 +259,6 @@ vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +269,6 @@ vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -307,7 +280,6 @@ vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +290,6 @@ vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -330,7 +301,6 @@ vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +311,6 @@ vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -353,7 +322,6 @@ vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +332,6 @@ vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -376,7 +343,6 @@ vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +353,6 @@ vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -399,7 +364,6 @@ vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
index 801c77409f1fb..36d7501d36c6b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
index 09bce7ccbb9b6..a4284be12b05c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
   return vfmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
   return vfmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
   return vfmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
   return vfmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
   return vfmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1,
   return vfmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1,
   return vfmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1,
   return vfmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
index c8e3534af272a..4668c7b8e8ccf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
index 604da9b416f97..60d839b982241 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +206,6 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -238,7 +217,6 @@ vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +227,6 @@ vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -261,7 +238,6 @@ vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +248,6 @@ vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -284,7 +259,6 @@ vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +269,6 @@ vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -307,7 +280,6 @@ vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +290,6 @@ vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -330,7 +301,6 @@ vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +311,6 @@ vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -353,7 +322,6 @@ vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +332,6 @@ vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -376,7 +343,6 @@ vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +353,6 @@ vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -399,7 +364,6 @@ vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
index 913fe656a89bb..814989fea75dd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +206,6 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -238,7 +217,6 @@ vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +227,6 @@ vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -261,7 +238,6 @@ vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +248,6 @@ vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -284,7 +259,6 @@ vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +269,6 @@ vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -307,7 +280,6 @@ vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +290,6 @@ vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -330,7 +301,6 @@ vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +311,6 @@ vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -353,7 +322,6 @@ vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +332,6 @@ vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -376,7 +343,6 @@ vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +353,6 @@ vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -399,7 +364,6 @@ vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
index 89f29856cfbb4..8890796c584d8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c
index bff016fb2f8e1..5b8935e1e230d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float> [[SRC:%.*]])
@@ -13,7 +12,6 @@
 //
 float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { return vfmv_f(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -23,7 +21,6 @@ vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dst, float src, size_t vl) {
   return vfmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32(<vscale x 2 x float> [[SRC:%.*]])
@@ -31,7 +28,6 @@ vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dst, float src, size_t vl) {
 //
 float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { return vfmv_f(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -41,7 +37,6 @@ vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dst, float src, size_t vl) {
   return vfmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32(<vscale x 4 x float> [[SRC:%.*]])
@@ -49,7 +44,6 @@ vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dst, float src, size_t vl) {
 //
 float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { return vfmv_f(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -59,7 +53,6 @@ vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dst, float src, size_t vl) {
   return vfmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32(<vscale x 8 x float> [[SRC:%.*]])
@@ -67,7 +60,6 @@ vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dst, float src, size_t vl) {
 //
 float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { return vfmv_f(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -77,7 +69,6 @@ vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dst, float src, size_t vl) {
   return vfmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32(<vscale x 16 x float> [[SRC:%.*]])
@@ -85,7 +76,6 @@ vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dst, float src, size_t vl) {
 //
 float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { return vfmv_f(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
@@ -95,7 +85,6 @@ vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dst, float src, size_t vl) {
   return vfmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double> [[SRC:%.*]])
@@ -103,7 +92,6 @@ vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dst, float src, size_t vl) {
 //
 double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { return vfmv_f(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -113,7 +101,6 @@ vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dst, double src, size_t vl) {
   return vfmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64(<vscale x 2 x double> [[SRC:%.*]])
@@ -121,7 +108,6 @@ vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dst, double src, size_t vl) {
 //
 double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { return vfmv_f(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -131,7 +117,6 @@ vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dst, double src, size_t vl) {
   return vfmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64(<vscale x 4 x double> [[SRC:%.*]])
@@ -139,7 +124,6 @@ vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dst, double src, size_t vl) {
 //
 double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { return vfmv_f(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
@@ -149,7 +133,6 @@ vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dst, double src, size_t vl) {
   return vfmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double> [[SRC:%.*]])
@@ -157,7 +140,6 @@ vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dst, double src, size_t vl) {
 //
 double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { return vfmv_f(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
index a85aded767a33..0e1a5c2bc336c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
   return vfncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
   return vfncvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
   return vfncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
   return vfncvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
   return vfncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
   return vfncvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
   return vfncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
   return vfncvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
   return vfncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
   return vfncvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
   return vfncvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
   return vfncvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
   return vfncvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
   return vfncvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
   return vfncvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
   return vfncvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
   return vfncvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
   return vfncvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
   return vfncvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
   return vfncvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -395,7 +356,6 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -405,7 +365,6 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -415,7 +374,6 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -425,7 +383,6 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -435,7 +392,6 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -445,7 +401,6 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -455,7 +410,6 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -465,7 +419,6 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
   return vfncvt_rod_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -475,7 +428,6 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -485,7 +437,6 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
   return vfncvt_rod_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -495,7 +446,6 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -505,7 +455,6 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
   return vfncvt_rod_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -515,7 +464,6 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
   return vfncvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
index 022e18e86c978..73e28708542c2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
   return vfneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
   return vfneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
   return vfneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
   return vfneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
   return vfneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
   return vfneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
   return vfneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
   return vfneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
index cebd237c413e9..cf04131ef16ba 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -227,7 +207,6 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -239,7 +218,6 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -250,7 +228,6 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -262,7 +239,6 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -273,7 +249,6 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +260,6 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -296,7 +270,6 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -308,7 +281,6 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -319,7 +291,6 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -331,7 +302,6 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -342,7 +312,6 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +323,6 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -365,7 +333,6 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -377,7 +344,6 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -388,7 +354,6 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -400,7 +365,6 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
index 56feb0882335f..28bd617ba1567 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfnmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -227,7 +207,6 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -239,7 +218,6 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -250,7 +228,6 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -262,7 +239,6 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -273,7 +249,6 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +260,6 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -296,7 +270,6 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -308,7 +281,6 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -319,7 +291,6 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -331,7 +302,6 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -342,7 +312,6 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +323,6 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -365,7 +333,6 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -377,7 +344,6 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -388,7 +354,6 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -400,7 +365,6 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfnmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
index 3bcdfdad8245e..6ccdd00b41837 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -227,7 +207,6 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -239,7 +218,6 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -250,7 +228,6 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -262,7 +239,6 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -273,7 +249,6 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +260,6 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -296,7 +270,6 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -308,7 +281,6 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -319,7 +291,6 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -331,7 +302,6 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -342,7 +312,6 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +323,6 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -365,7 +333,6 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -377,7 +344,6 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -388,7 +354,6 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -400,7 +365,6 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
index ba316ca031e27..6f64cbaeb9f7c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1,
   return vfnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +196,6 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -227,7 +207,6 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -239,7 +218,6 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -250,7 +228,6 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -262,7 +239,6 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -273,7 +249,6 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +260,6 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -296,7 +270,6 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -308,7 +281,6 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -319,7 +291,6 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -331,7 +302,6 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -342,7 +312,6 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -354,7 +323,6 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -365,7 +333,6 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -377,7 +344,6 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -388,7 +354,6 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -400,7 +365,6 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
index 406b8e3802b1a..e8b1c122d8c07 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfrdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfrdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfrdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfrdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfrdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfrdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfrdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfrdiv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
index 66fb428472245..9fb6d54e155b6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
   return vfrec7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
   return vfrec7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
   return vfrec7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
   return vfrec7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
   return vfrec7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
   return vfrec7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
   return vfrec7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
   return vfrec7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
index 0d74456ba5caa..cf09fa1e44492 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -17,7 +16,6 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst,
   return vfredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -28,7 +26,6 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
   return vfredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -39,7 +36,6 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
   return vfredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -50,7 +46,6 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
   return vfredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -61,7 +56,6 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
   return vfredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -72,7 +66,6 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
   return vfredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -83,7 +76,6 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
   return vfredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -94,7 +86,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
   return vfredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
   return vfredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +107,6 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
   return vfredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +118,6 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vfredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -141,7 +129,6 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
   return vfredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -153,7 +140,6 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
   return vfredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +151,6 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
   return vfredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -177,7 +162,6 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -189,7 +173,6 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -201,7 +184,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
index b3467db93431e..bb66a84074138 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -17,7 +16,6 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst,
   return vfredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -28,7 +26,6 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
   return vfredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -39,7 +36,6 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
   return vfredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -50,7 +46,6 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
   return vfredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -61,7 +56,6 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
   return vfredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -72,7 +66,6 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
   return vfredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -83,7 +76,6 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
   return vfredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -94,7 +86,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
   return vfredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
   return vfredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +107,6 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
   return vfredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +118,6 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vfredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -141,7 +129,6 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
   return vfredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -153,7 +140,6 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
   return vfredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +151,6 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
   return vfredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -177,7 +162,6 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -189,7 +173,6 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -201,7 +184,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
index 8c82c7828092f..4555884d97511 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -17,7 +16,6 @@ vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
   return vfredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -28,7 +26,6 @@ vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
   return vfredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -39,7 +36,6 @@ vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
   return vfredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -50,7 +46,6 @@ vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
   return vfredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -61,7 +56,6 @@ vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
   return vfredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -72,7 +66,6 @@ vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
   return vfredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -83,7 +76,6 @@ vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
   return vfredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -94,7 +86,6 @@ vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
   return vfredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
   return vfredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -117,7 +107,6 @@ vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
   return vfredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -129,7 +118,6 @@ vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vfredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -141,7 +129,6 @@ vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
   return vfredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -153,7 +140,6 @@ vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
   return vfredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +151,6 @@ vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
   return vfredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -177,7 +162,6 @@ vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -189,7 +173,6 @@ vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -201,7 +184,6 @@ vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -213,7 +195,6 @@ vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
   return vfredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -225,7 +206,6 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst,
   return vfredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -237,7 +217,6 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst,
   return vfredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -249,7 +228,6 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst,
   return vfredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -261,7 +239,6 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst,
   return vfredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -273,7 +250,6 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst,
   return vfredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -285,7 +261,6 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst,
   return vfredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -297,7 +272,6 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst,
   return vfredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -309,7 +283,6 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst,
   return vfredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -321,7 +294,6 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst,
   return vfredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -333,7 +305,6 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
   return vfredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -345,7 +316,6 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vfredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -357,7 +327,6 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
   return vfredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -369,7 +338,6 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
   return vfredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -381,7 +349,6 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
   return vfredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -393,7 +360,6 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -405,7 +371,6 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -417,7 +382,6 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
index f60e3fd6e0ad7..daf2c9b07cde6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
   return vfrsqrt7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
   return vfrsqrt7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
   return vfrsqrt7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
   return vfrsqrt7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
   return vfrsqrt7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
   return vfrsqrt7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
   return vfrsqrt7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
   return vfrsqrt7(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
index 76ae856786ec0..ef2389abe96ac 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
index bd531c42aec21..1a6babf318d86 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfsgnj(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -205,7 +186,6 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -215,7 +195,6 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +214,6 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -247,7 +224,6 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -257,7 +233,6 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -268,7 +243,6 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -278,7 +252,6 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -289,7 +262,6 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -299,7 +271,6 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -310,7 +281,6 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -320,7 +290,6 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -331,7 +300,6 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -341,7 +309,6 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -352,7 +319,6 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -362,7 +328,6 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -373,7 +338,6 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -383,7 +347,6 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
   return vfsgnjn(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +357,6 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +366,6 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -415,7 +376,6 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -425,7 +385,6 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -436,7 +395,6 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -446,7 +404,6 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -457,7 +414,6 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -467,7 +423,6 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -478,7 +433,6 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -488,7 +442,6 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -499,7 +452,6 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -509,7 +461,6 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -520,7 +471,6 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -530,7 +480,6 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -541,7 +490,6 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -551,7 +499,6 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -562,7 +509,6 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfsgnjx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
index b1e76d5850ee8..02eb8c9fe4beb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
   return vfslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
   return vfslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
   return vfslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
   return vfslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
   return vfslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
   return vfslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
   return vfslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
   return vfslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
index 1136e98946824..6b43cb6b3f95d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
   return vfslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
   return vfslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
   return vfslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
   return vfslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
   return vfslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
   return vfslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
   return vfslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
   return vfslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
index bdb0b53cbfd6b..e49382c111eee 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
   return vfsqrt(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
   return vfsqrt(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
   return vfsqrt(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
   return vfsqrt(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
   return vfsqrt(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
   return vfsqrt(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
   return vfsqrt(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
   return vfsqrt(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
index f8c75a7cb1789..22b4faf6485e7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -173,7 +157,6 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +167,6 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
   return vfsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
index 6c0c516ded283..6af5e51288d64 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfwadd_vf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
   return vfwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
   return vfwadd_wf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
   return vfwadd_vf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
   return vfwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
   return vfwadd_wf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
   return vfwadd_vf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
   return vfwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
   return vfwadd_wf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
   return vfwadd_vf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
   return vfwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
index 56e0d6c399454..e0579552765ab 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
   return vfwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
   return vfwcvt_rtz_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
   return vfwcvt_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
   return vfwcvt_rtz_xu(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
   return vfwcvt_f(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
index 6126db8cfe83d..aeb2a27df958a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
   return vfwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1,
   return vfwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
   return vfwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1,
   return vfwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
   return vfwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1,
   return vfwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
   return vfwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1,
   return vfwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -116,7 +106,6 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +117,6 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +127,6 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -151,7 +138,6 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -162,7 +148,6 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -174,7 +159,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
index d2e35dc6385fa..e550c07b0dec5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
   return vfwmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1,
   return vfwmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
   return vfwmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1,
   return vfwmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
   return vfwmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1,
   return vfwmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
   return vfwmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1,
   return vfwmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -116,7 +106,6 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +117,6 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +127,6 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -151,7 +138,6 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -162,7 +148,6 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -174,7 +159,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfwmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
index 5bcde0860382b..4089e276c66a4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
   return vfwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
   return vfwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
index f8a260a8d2f77..d714fb520a826 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
   return vfwnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1,
   return vfwnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
   return vfwnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1,
   return vfwnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
   return vfwnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1,
   return vfwnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
   return vfwnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1,
   return vfwnmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -116,7 +106,6 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +117,6 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +127,6 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -151,7 +138,6 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -162,7 +148,6 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -174,7 +159,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfwnmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
index a5e5cc72ac4d0..1f8160fec2c3f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
   return vfwnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1,
   return vfwnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
   return vfwnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1,
   return vfwnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
   return vfwnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1,
   return vfwnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
   return vfwnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1,
   return vfwnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -105,7 +96,6 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -116,7 +106,6 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
   return vfwnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -128,7 +117,6 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -139,7 +127,6 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
   return vfwnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -151,7 +138,6 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -162,7 +148,6 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
   return vfwnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -174,7 +159,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
   return vfwnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
index be390c66fb531..14049e1ea5cf2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -17,7 +16,6 @@ vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
   return vfwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -29,7 +27,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
   return vfwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -41,7 +38,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
   return vfwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -53,7 +49,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
   return vfwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -65,7 +60,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
   return vfwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -77,7 +71,6 @@ vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -89,7 +82,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -101,7 +93,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -113,7 +104,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
   return vfwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -125,7 +115,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
   return vfwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -137,7 +126,6 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst,
   return vfwredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -149,7 +137,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst,
   return vfwredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -161,7 +148,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst,
   return vfwredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -173,7 +159,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst,
   return vfwredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -185,7 +170,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst,
   return vfwredosum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -197,7 +181,6 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vfwredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -209,7 +192,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
   return vfwredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -221,7 +203,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
   return vfwredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -233,7 +214,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
   return vfwredosum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
index 1f0bbe88c630b..bea437dfd7e6a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vfwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
   return vfwsub_vf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
   return vfwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
   return vfwsub_wf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
   return vfwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
   return vfwsub_vf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -79,7 +72,6 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
   return vfwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -89,7 +81,6 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
   return vfwsub_wf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -100,7 +91,6 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
   return vfwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
   return vfwsub_vf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
   return vfwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -131,7 +119,6 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
   return vfwsub_wf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -142,7 +129,6 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
   return vfwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
   return vfwsub_vf(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -163,7 +148,6 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
   return vfwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
index 49b319e6aa276..061096c9f9a0b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -57,7 +52,6 @@ vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -67,7 +61,6 @@ vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -77,7 +70,6 @@ vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -99,7 +90,6 @@ vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -121,7 +110,6 @@ vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -132,7 +120,6 @@ vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -143,7 +130,6 @@ vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -154,7 +140,6 @@ vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -165,7 +150,6 @@ vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -176,7 +160,6 @@ vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -187,7 +170,6 @@ vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -198,7 +180,6 @@ vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -209,7 +190,6 @@ vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -220,7 +200,6 @@ vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -231,7 +210,6 @@ vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return vid(mask, maskedoff, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vid_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
index d90f8c08690cc..ec51c9169854f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
   return viota(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_viota_m_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
index 2d9b6cb86bc70..c1fedfa99f442 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -16,7 +15,6 @@ vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -27,7 +25,6 @@ vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -38,7 +35,6 @@ vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -49,7 +45,6 @@ vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -60,7 +55,6 @@ vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -71,7 +65,6 @@ vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -82,7 +75,6 @@ vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -93,7 +85,6 @@ vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -104,7 +95,6 @@ vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -115,7 +105,6 @@ vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -126,7 +115,6 @@ vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -137,7 +125,6 @@ vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -148,7 +135,6 @@ vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -159,7 +145,6 @@ vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -170,7 +155,6 @@ vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -181,7 +165,6 @@ vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -192,7 +175,6 @@ vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -203,7 +185,6 @@ vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -214,7 +195,6 @@ vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -225,7 +205,6 @@ vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -236,7 +215,6 @@ vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -247,7 +225,6 @@ vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -258,7 +235,6 @@ vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -269,7 +245,6 @@ vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -280,7 +255,6 @@ vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -291,7 +265,6 @@ vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -302,7 +275,6 @@ vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -313,7 +285,6 @@ vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -324,7 +295,6 @@ vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -335,7 +305,6 @@ vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -346,7 +315,6 @@ vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -357,7 +325,6 @@ vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -368,7 +335,6 @@ vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -379,7 +345,6 @@ vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -390,7 +355,6 @@ vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -401,7 +365,6 @@ vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -412,7 +375,6 @@ vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -423,7 +385,6 @@ vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -434,7 +395,6 @@ vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -445,7 +405,6 @@ vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -456,7 +415,6 @@ vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -467,7 +425,6 @@ vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -478,7 +435,6 @@ vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -489,7 +445,6 @@ vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -500,7 +455,6 @@ vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -511,7 +465,6 @@ vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -522,7 +475,6 @@ vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -533,7 +485,6 @@ vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -544,7 +495,6 @@ vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -555,7 +505,6 @@ vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -566,7 +515,6 @@ vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -577,7 +525,6 @@ vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -588,7 +535,6 @@ vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -599,7 +545,6 @@ vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -610,7 +555,6 @@ vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -621,7 +565,6 @@ vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -632,7 +575,6 @@ vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -643,7 +585,6 @@ vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -654,7 +595,6 @@ vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -665,7 +605,6 @@ vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -676,7 +615,6 @@ vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -687,7 +625,6 @@ vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -698,7 +635,6 @@ vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -709,7 +645,6 @@ vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -720,7 +655,6 @@ vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -731,7 +665,6 @@ vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -742,7 +675,6 @@ vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -753,7 +685,6 @@ vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -764,7 +695,6 @@ vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -775,7 +705,6 @@ vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -786,7 +715,6 @@ vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -797,7 +725,6 @@ vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -808,7 +735,6 @@ vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -819,7 +745,6 @@ vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -830,7 +755,6 @@ vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -841,7 +765,6 @@ vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -852,7 +775,6 @@ vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -863,7 +785,6 @@ vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -874,7 +795,6 @@ vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -885,7 +805,6 @@ vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -896,7 +815,6 @@ vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -907,7 +825,6 @@ vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -918,7 +835,6 @@ vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -929,7 +845,6 @@ vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -940,7 +855,6 @@ vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -951,7 +865,6 @@ vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -962,7 +875,6 @@ vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -973,7 +885,6 @@ vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -984,7 +895,6 @@ vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -995,7 +905,6 @@ vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -1006,7 +915,6 @@ vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1017,7 +925,6 @@ vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1028,7 +935,6 @@ vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1039,7 +945,6 @@ vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1050,7 +955,6 @@ vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1061,7 +965,6 @@ vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1072,7 +975,6 @@ vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1083,7 +985,6 @@ vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1094,7 +995,6 @@ vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1105,7 +1005,6 @@ vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1116,7 +1015,6 @@ vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1127,7 +1025,6 @@ vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1138,7 +1035,6 @@ vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1149,7 +1045,6 @@ vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1160,7 +1055,6 @@ vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1171,7 +1065,6 @@ vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1182,7 +1075,6 @@ vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1193,7 +1085,6 @@ vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1204,7 +1095,6 @@ vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1215,7 +1105,6 @@ vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1226,7 +1115,6 @@ vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1237,7 +1125,6 @@ vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1248,7 +1135,6 @@ vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1259,7 +1145,6 @@ vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1270,7 +1155,6 @@ vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1281,7 +1165,6 @@ vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1292,7 +1175,6 @@ vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1303,7 +1185,6 @@ vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1314,7 +1195,6 @@ vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1326,7 +1206,6 @@ vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1337,7 +1216,6 @@ vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1348,7 +1226,6 @@ vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1359,7 +1236,6 @@ vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1370,7 +1246,6 @@ vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1381,7 +1256,6 @@ vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1392,7 +1266,6 @@ vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1403,7 +1276,6 @@ vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1414,7 +1286,6 @@ vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1425,7 +1296,6 @@ vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1436,7 +1306,6 @@ vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1447,7 +1316,6 @@ vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1458,7 +1326,6 @@ vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1469,7 +1336,6 @@ vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1480,7 +1346,6 @@ vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1491,7 +1356,6 @@ vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1502,7 +1366,6 @@ vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1513,7 +1376,6 @@ vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1524,7 +1386,6 @@ vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1535,7 +1396,6 @@ vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1546,7 +1406,6 @@ vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1557,7 +1416,6 @@ vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1568,7 +1426,6 @@ vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1579,7 +1436,6 @@ vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1590,7 +1446,6 @@ vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1601,7 +1456,6 @@ vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1612,7 +1466,6 @@ vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1623,7 +1476,6 @@ vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1634,7 +1486,6 @@ vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1645,7 +1496,6 @@ vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1656,7 +1506,6 @@ vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1667,7 +1516,6 @@ vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1678,7 +1526,6 @@ vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1689,7 +1536,6 @@ vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1700,7 +1546,6 @@ vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1711,7 +1556,6 @@ vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1722,7 +1566,6 @@ vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1733,7 +1576,6 @@ vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1744,7 +1586,6 @@ vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1755,7 +1596,6 @@ vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1766,7 +1606,6 @@ vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1777,7 +1616,6 @@ vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1788,7 +1626,6 @@ vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1799,7 +1636,6 @@ vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1810,7 +1646,6 @@ vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1821,7 +1656,6 @@ vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1832,7 +1666,6 @@ vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1843,7 +1676,6 @@ vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1854,7 +1686,6 @@ vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1865,7 +1696,6 @@ vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1876,7 +1706,6 @@ vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1887,7 +1716,6 @@ vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1898,7 +1726,6 @@ vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1909,7 +1736,6 @@ vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1920,7 +1746,6 @@ vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1931,7 +1756,6 @@ vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1942,7 +1766,6 @@ vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1953,7 +1776,6 @@ vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1964,7 +1786,6 @@ vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1975,7 +1796,6 @@ vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
   return vloxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1986,7 +1806,6 @@ vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1997,7 +1816,6 @@ vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2008,7 +1826,6 @@ vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2019,7 +1836,6 @@ vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
   return vloxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2030,7 +1846,6 @@ vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2041,7 +1856,6 @@ vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2052,7 +1866,6 @@ vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2063,7 +1876,6 @@ vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
   return vloxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2074,7 +1886,6 @@ vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2085,7 +1896,6 @@ vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2096,7 +1906,6 @@ vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
   return vloxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
index 07c235ef8c5e6..efafbf32ff4c3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -16,7 +15,6 @@ vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -27,7 +25,6 @@ vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -38,7 +35,6 @@ vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -49,7 +45,6 @@ vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl)
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -60,7 +55,6 @@ vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl)
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -71,7 +65,6 @@ vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl)
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -82,7 +75,6 @@ vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl)
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -93,7 +85,6 @@ vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -104,7 +95,6 @@ vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -115,7 +105,6 @@ vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -126,7 +115,6 @@ vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -137,7 +125,6 @@ vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -148,7 +135,6 @@ vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -159,7 +145,6 @@ vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -170,7 +155,6 @@ vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -181,7 +165,6 @@ vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -192,7 +175,6 @@ vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -203,7 +185,6 @@ vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -214,7 +195,6 @@ vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -225,7 +205,6 @@ vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -236,7 +215,6 @@ vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -247,7 +225,6 @@ vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -258,7 +235,6 @@ vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -269,7 +245,6 @@ vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -280,7 +255,6 @@ vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -291,7 +265,6 @@ vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -302,7 +275,6 @@ vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -313,7 +285,6 @@ vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -324,7 +295,6 @@ vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -335,7 +305,6 @@ vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -346,7 +315,6 @@ vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -357,7 +325,6 @@ vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -368,7 +335,6 @@ vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -379,7 +345,6 @@ vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -390,7 +355,6 @@ vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -401,7 +365,6 @@ vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -412,7 +375,6 @@ vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -423,7 +385,6 @@ vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -434,7 +395,6 @@ vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -445,7 +405,6 @@ vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -456,7 +415,6 @@ vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -467,7 +425,6 @@ vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -478,7 +435,6 @@ vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -489,7 +445,6 @@ vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -500,7 +455,6 @@ vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -511,7 +465,6 @@ vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -522,7 +475,6 @@ vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -533,7 +485,6 @@ vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -544,7 +495,6 @@ vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -555,7 +505,6 @@ vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -566,7 +515,6 @@ vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -577,7 +525,6 @@ vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -588,7 +535,6 @@ vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -599,7 +545,6 @@ vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -610,7 +555,6 @@ vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -621,7 +565,6 @@ vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -632,7 +575,6 @@ vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -643,7 +585,6 @@ vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -654,7 +595,6 @@ vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -665,7 +605,6 @@ vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -676,7 +615,6 @@ vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -687,7 +625,6 @@ vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -698,7 +635,6 @@ vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -709,7 +645,6 @@ vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -720,7 +655,6 @@ vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -731,7 +665,6 @@ vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -742,7 +675,6 @@ vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -753,7 +685,6 @@ vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -764,7 +695,6 @@ vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -775,7 +705,6 @@ vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -786,7 +715,6 @@ vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -797,7 +725,6 @@ vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -808,7 +735,6 @@ vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -819,7 +745,6 @@ vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -830,7 +755,6 @@ vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -841,7 +765,6 @@ vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -852,7 +775,6 @@ vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -863,7 +785,6 @@ vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -874,7 +795,6 @@ vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -885,7 +805,6 @@ vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -896,7 +815,6 @@ vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -907,7 +825,6 @@ vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -918,7 +835,6 @@ vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -929,7 +845,6 @@ vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
@@ -940,7 +855,6 @@ vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -951,7 +865,6 @@ vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -962,7 +875,6 @@ vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -973,7 +885,6 @@ vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -984,7 +895,6 @@ vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -995,7 +905,6 @@ vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
@@ -1006,7 +915,6 @@ vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1017,7 +925,6 @@ vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1028,7 +935,6 @@ vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1039,7 +945,6 @@ vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1050,7 +955,6 @@ vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
@@ -1061,7 +965,6 @@ vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
@@ -1072,7 +975,6 @@ vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
@@ -1083,7 +985,6 @@ vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
@@ -1094,7 +995,6 @@ vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
@@ -1105,7 +1005,6 @@ vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1116,7 +1015,6 @@ vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1127,7 +1025,6 @@ vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1138,7 +1035,6 @@ vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1149,7 +1045,6 @@ vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1160,7 +1055,6 @@ vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1171,7 +1065,6 @@ vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1182,7 +1075,6 @@ vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1193,7 +1085,6 @@ vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1204,7 +1095,6 @@ vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1215,7 +1105,6 @@ vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1226,7 +1115,6 @@ vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
@@ -1237,7 +1125,6 @@ vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1248,7 +1135,6 @@ vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1259,7 +1145,6 @@ vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1270,7 +1155,6 @@ vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1281,7 +1165,6 @@ vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
@@ -1292,7 +1175,6 @@ vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
@@ -1303,7 +1185,6 @@ vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
@@ -1314,7 +1195,6 @@ vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
@@ -1325,7 +1205,6 @@ vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
@@ -1336,7 +1215,6 @@ vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1347,7 +1225,6 @@ vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1358,7 +1235,6 @@ vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1369,7 +1245,6 @@ vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1380,7 +1255,6 @@ vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1391,7 +1265,6 @@ vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1402,7 +1275,6 @@ vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1413,7 +1285,6 @@ vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1424,7 +1295,6 @@ vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1435,7 +1305,6 @@ vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1446,7 +1315,6 @@ vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1457,7 +1325,6 @@ vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1468,7 +1335,6 @@ vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1479,7 +1345,6 @@ vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1490,7 +1355,6 @@ vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
@@ -1501,7 +1365,6 @@ vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
@@ -1512,7 +1375,6 @@ vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
@@ -1523,7 +1385,6 @@ vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
@@ -1534,7 +1395,6 @@ vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
@@ -1545,7 +1405,6 @@ vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1556,7 +1415,6 @@ vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1567,7 +1425,6 @@ vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1578,7 +1435,6 @@ vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1589,7 +1445,6 @@ vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1600,7 +1455,6 @@ vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1611,7 +1465,6 @@ vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1622,7 +1475,6 @@ vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1633,7 +1485,6 @@ vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1644,7 +1495,6 @@ vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1655,7 +1505,6 @@ vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1666,7 +1515,6 @@ vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1677,7 +1525,6 @@ vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
@@ -1688,7 +1535,6 @@ vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
@@ -1699,7 +1545,6 @@ vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
@@ -1710,7 +1555,6 @@ vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
@@ -1721,7 +1565,6 @@ vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1732,7 +1575,6 @@ vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1743,7 +1585,6 @@ vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1754,7 +1595,6 @@ vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1765,7 +1605,6 @@ vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1776,7 +1615,6 @@ vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1787,7 +1625,6 @@ vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1798,7 +1635,6 @@ vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1809,7 +1645,6 @@ vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1820,7 +1655,6 @@ vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1831,7 +1665,6 @@ vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1842,7 +1675,6 @@ vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1853,7 +1685,6 @@ vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1864,7 +1695,6 @@ vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1875,7 +1705,6 @@ vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
@@ -1886,7 +1715,6 @@ vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
@@ -1897,7 +1725,6 @@ vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
@@ -1908,7 +1735,6 @@ vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
@@ -1919,7 +1745,6 @@ vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
@@ -1930,7 +1755,6 @@ vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1941,7 +1765,6 @@ vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1952,7 +1775,6 @@ vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -1963,7 +1785,6 @@ vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -1974,7 +1795,6 @@ vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t
   return vluxei8(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -1985,7 +1805,6 @@ vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -1996,7 +1815,6 @@ vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2007,7 +1825,6 @@ vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2018,7 +1835,6 @@ vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_
   return vluxei16(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2029,7 +1845,6 @@ vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2040,7 +1855,6 @@ vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2051,7 +1865,6 @@ vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
@@ -2062,7 +1875,6 @@ vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_
   return vluxei32(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
@@ -2073,7 +1885,6 @@ vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
@@ -2084,7 +1895,6 @@ vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
@@ -2095,7 +1905,6 @@ vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_
   return vluxei64(base, bindex, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
index e902a63f976cf..7c6c217a8f6c9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t o
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t o
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t o
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, s
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, si
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, si
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, si
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, si
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t o
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t o
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t o
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
   return vmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1,
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op
   return vmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c
index 7ee412a7515cc..1c24617f7d91b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -36,7 +33,6 @@ vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -46,7 +42,6 @@ vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -57,7 +52,6 @@ vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -99,7 +90,6 @@ vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -120,7 +109,6 @@ vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -130,7 +118,6 @@ vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -141,7 +128,6 @@ vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -162,7 +147,6 @@ vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +185,6 @@ vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -225,7 +204,6 @@ vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -236,7 +214,6 @@ vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +223,6 @@ vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +232,6 @@ vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -267,7 +242,6 @@ vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -278,7 +252,6 @@ vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -288,7 +261,6 @@ vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -298,7 +270,6 @@ vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -309,7 +280,6 @@ vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -320,7 +290,6 @@ vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -331,7 +300,6 @@ vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -341,7 +309,6 @@ vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -352,7 +319,6 @@ vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -363,7 +329,6 @@ vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +339,6 @@ vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +348,6 @@ vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -395,7 +358,6 @@ vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -406,7 +368,6 @@ vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -416,7 +377,6 @@ vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -426,7 +386,6 @@ vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -437,7 +396,6 @@ vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -448,7 +406,6 @@ vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -458,7 +415,6 @@ vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -468,7 +424,6 @@ vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -479,7 +434,6 @@ vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -490,7 +444,6 @@ vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -500,7 +453,6 @@ vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +462,6 @@ vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -521,7 +472,6 @@ vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -532,7 +482,6 @@ vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -542,7 +491,6 @@ vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -552,7 +500,6 @@ vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -563,7 +510,6 @@ vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -574,7 +520,6 @@ vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -585,7 +530,6 @@ vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +539,6 @@ vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -606,7 +549,6 @@ vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -617,7 +559,6 @@ vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -627,7 +568,6 @@ vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -637,7 +577,6 @@ vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -648,7 +587,6 @@ vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -659,7 +597,6 @@ vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -669,7 +606,6 @@ vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -679,7 +615,6 @@ vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -690,7 +625,6 @@ vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -701,7 +635,6 @@ vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -711,7 +644,6 @@ vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -721,7 +653,6 @@ vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -732,7 +663,6 @@ vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -743,7 +673,6 @@ vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -753,7 +682,6 @@ vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -763,7 +691,6 @@ vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -774,7 +701,6 @@ vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -785,7 +711,6 @@ vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -795,7 +720,6 @@ vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -805,7 +729,6 @@ vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -816,7 +739,6 @@ vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -827,7 +749,6 @@ vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +758,6 @@ vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +767,6 @@ vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -858,7 +777,6 @@ vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -869,7 +787,6 @@ vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -879,7 +796,6 @@ vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -889,7 +805,6 @@ vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -900,7 +815,6 @@ vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -911,7 +825,6 @@ vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -921,7 +834,6 @@ vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -931,7 +843,6 @@ vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -942,7 +853,6 @@ vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -953,7 +863,6 @@ vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -963,7 +872,6 @@ vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -973,7 +881,6 @@ vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -984,7 +891,6 @@ vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -995,7 +901,6 @@ vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1005,7 +910,6 @@ vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1015,7 +919,6 @@ vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1026,7 +929,6 @@ vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1037,7 +939,6 @@ vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1047,7 +948,6 @@ vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1057,7 +957,6 @@ vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1068,7 +967,6 @@ vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1079,7 +977,6 @@ vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1089,7 +986,6 @@ vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1099,7 +995,6 @@ vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1110,7 +1005,6 @@ vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1015,6 @@ vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1131,7 +1024,6 @@ vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1141,7 +1033,6 @@ vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1152,7 +1043,6 @@ vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1163,7 +1053,6 @@ vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1173,7 +1062,6 @@ vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1183,7 +1071,6 @@ vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1081,6 @@ vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1205,7 +1091,6 @@ vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1215,7 +1100,6 @@ vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1225,7 +1109,6 @@ vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1236,7 +1119,6 @@ vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1247,7 +1129,6 @@ vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1258,7 +1139,6 @@ vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1268,7 +1148,6 @@ vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1279,7 +1158,6 @@ vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1290,7 +1168,6 @@ vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1301,7 +1178,6 @@ vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1187,6 @@ vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1197,6 @@ vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1207,6 @@ vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1343,7 +1216,6 @@ vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1225,6 @@ vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1235,6 @@ vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1245,6 @@ vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1385,7 +1254,6 @@ vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1395,7 +1263,6 @@ vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1406,7 +1273,6 @@ vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1417,7 +1283,6 @@ vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1427,7 +1292,6 @@ vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1437,7 +1301,6 @@ vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1448,7 +1311,6 @@ vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1459,7 +1321,6 @@ vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1469,7 +1330,6 @@ vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1339,6 @@ vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1349,6 @@ vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1501,7 +1359,6 @@ vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1512,7 +1369,6 @@ vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1522,7 +1378,6 @@ vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1533,7 +1388,6 @@ vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1398,6 @@ vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1407,6 @@ vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1416,6 @@ vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1575,7 +1426,6 @@ vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1586,7 +1436,6 @@ vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1596,7 +1445,6 @@ vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1606,7 +1454,6 @@ vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1617,7 +1464,6 @@ vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1628,7 +1474,6 @@ vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1638,7 +1483,6 @@ vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1648,7 +1492,6 @@ vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1659,7 +1502,6 @@ vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1670,7 +1512,6 @@ vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1680,7 +1521,6 @@ vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1690,7 +1530,6 @@ vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1701,7 +1540,6 @@ vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1712,7 +1550,6 @@ vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1722,7 +1559,6 @@ vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1732,7 +1568,6 @@ vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1743,7 +1578,6 @@ vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1588,6 @@ vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1597,6 @@ vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1606,6 @@ vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1785,7 +1616,6 @@ vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1796,7 +1626,6 @@ vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1806,7 +1635,6 @@ vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1816,7 +1644,6 @@ vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1827,7 +1654,6 @@ vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
@@ -1838,7 +1664,6 @@ vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2,
   return vmadc(op1, op2, carryin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1848,7 +1673,6 @@ vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmadc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
index 4bd51dbed83cf..80cb56780f5f8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t o
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t o
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t o
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, s
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, si
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, si
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, si
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, si
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t o
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t o
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t o
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
   return vmadd(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1,
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op
   return vmadd(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c
index 47913d4c6b369..bebab3fcbea57 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmand_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
   return vmand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmandnot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmandnot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmandnot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmandnot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmandnot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmandnot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
index 66c0d79de5d03..388dbd9710b51 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmax(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmaxu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
index ecbe041476fdd..6c2475dba4fd7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +195,6 @@ vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -225,7 +205,6 @@ vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -236,7 +215,6 @@ vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -247,7 +225,6 @@ vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -258,7 +235,6 @@ vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -269,7 +245,6 @@ vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -280,7 +255,6 @@ vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -291,7 +265,6 @@ vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -302,7 +275,6 @@ vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -313,7 +285,6 @@ vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -324,7 +295,6 @@ vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -335,7 +305,6 @@ vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -346,7 +315,6 @@ vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -357,7 +325,6 @@ vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -368,7 +335,6 @@ vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -379,7 +345,6 @@ vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -401,7 +365,6 @@ vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -412,7 +375,6 @@ vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -423,7 +385,6 @@ vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -434,7 +395,6 @@ vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -445,7 +405,6 @@ vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -456,7 +415,6 @@ vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -467,7 +425,6 @@ vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -478,7 +435,6 @@ vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -489,7 +445,6 @@ vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -511,7 +465,6 @@ vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -522,7 +475,6 @@ vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -533,7 +485,6 @@ vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +495,6 @@ vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -555,7 +505,6 @@ vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -566,7 +515,6 @@ vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -577,7 +525,6 @@ vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -588,7 +535,6 @@ vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -599,7 +545,6 @@ vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -610,7 +555,6 @@ vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -621,7 +565,6 @@ vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +575,6 @@ vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -643,7 +585,6 @@ vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -654,7 +595,6 @@ vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -665,7 +605,6 @@ vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -676,7 +615,6 @@ vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -687,7 +625,6 @@ vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -698,7 +635,6 @@ vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -709,7 +645,6 @@ vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -720,7 +655,6 @@ vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -731,7 +665,6 @@ vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -742,7 +675,6 @@ vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -753,7 +685,6 @@ vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +695,6 @@ vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -775,7 +705,6 @@ vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -786,7 +715,6 @@ vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -797,7 +725,6 @@ vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -808,7 +735,6 @@ vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -819,7 +745,6 @@ vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -830,7 +755,6 @@ vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -841,7 +765,6 @@ vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -852,7 +775,6 @@ vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -863,7 +785,6 @@ vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -874,7 +795,6 @@ vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -885,7 +805,6 @@ vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +815,6 @@ vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -907,7 +825,6 @@ vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -918,7 +835,6 @@ vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -929,7 +845,6 @@ vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -940,7 +855,6 @@ vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -951,7 +865,6 @@ vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -962,7 +875,6 @@ vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -973,7 +885,6 @@ vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +895,6 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -995,7 +905,6 @@ vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1006,7 +915,6 @@ vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1017,7 +925,6 @@ vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1028,7 +935,6 @@ vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1039,7 +945,6 @@ vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1050,7 +955,6 @@ vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1061,7 +965,6 @@ vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1,
   return vmerge(mask, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
index d7f5b59dcc1ab..62d75c3d53b4d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -119,7 +108,6 @@ vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -171,7 +155,6 @@ vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +164,6 @@ vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +173,6 @@ vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
   return vmfeq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +184,6 @@ vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -237,7 +215,6 @@ vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +226,6 @@ vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -260,7 +236,6 @@ vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +247,6 @@ vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -283,7 +257,6 @@ vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +268,6 @@ vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +278,6 @@ vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +289,6 @@ vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -329,7 +299,6 @@ vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +310,6 @@ vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -352,7 +320,6 @@ vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +331,6 @@ vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -375,7 +341,6 @@ vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +352,6 @@ vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfeq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
index e4624ab9eb433..325f527dd6756 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vbool64_t test_vmfge_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vbool64_t test_vmfge_vf_f32mf2_b64 (vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vbool32_t test_vmfge_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vbool32_t test_vmfge_vf_f32m1_b32 (vfloat32m1_t op1, float op2, size_t vl) {
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vbool16_t test_vmfge_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vbool16_t test_vmfge_vf_f32m2_b16 (vfloat32m2_t op1, float op2, size_t vl) {
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vbool8_t test_vmfge_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vbool8_t test_vmfge_vf_f32m4_b8 (vfloat32m4_t op1, float op2, size_t vl) {
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vbool4_t test_vmfge_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vbool4_t test_vmfge_vf_f32m8_b4 (vfloat32m8_t op1, float op2, size_t vl) {
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vbool64_t test_vmfge_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vbool64_t test_vmfge_vf_f64m1_b64 (vfloat64m1_t op1, double op2, size_t vl) {
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vbool32_t test_vmfge_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vbool32_t test_vmfge_vf_f64m2_b32 (vfloat64m2_t op1, double op2, size_t vl) {
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool16_t test_vmfge_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool16_t test_vmfge_vf_f64m4_b16 (vfloat64m4_t op1, double op2, size_t vl) {
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vbool8_t test_vmfge_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vbool8_t test_vmfge_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) {
   return vmfge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vbool64_t test_vmfge_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vbool64_t test_vmfge_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vbool32_t test_vmfge_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vbool32_t test_vmfge_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vbool16_t test_vmfge_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vbool16_t test_vmfge_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vbool8_t test_vmfge_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vbool8_t test_vmfge_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vbool4_t test_vmfge_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vbool4_t test_vmfge_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vbool64_t test_vmfge_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vbool64_t test_vmfge_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vbool32_t test_vmfge_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vbool32_t test_vmfge_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vbool16_t test_vmfge_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vbool16_t test_vmfge_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vbool8_t test_vmfge_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
   return vmfge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
index 2cf1abcc38ab2..a0bf92a539ef1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vbool64_t test_vmfgt_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vbool64_t test_vmfgt_vf_f32mf2_b64 (vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vbool32_t test_vmfgt_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vbool32_t test_vmfgt_vf_f32m1_b32 (vfloat32m1_t op1, float op2, size_t vl) {
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vbool16_t test_vmfgt_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vbool16_t test_vmfgt_vf_f32m2_b16 (vfloat32m2_t op1, float op2, size_t vl) {
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vbool8_t test_vmfgt_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vbool8_t test_vmfgt_vf_f32m4_b8 (vfloat32m4_t op1, float op2, size_t vl) {
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vbool4_t test_vmfgt_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vbool4_t test_vmfgt_vf_f32m8_b4 (vfloat32m8_t op1, float op2, size_t vl) {
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vbool64_t test_vmfgt_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vbool64_t test_vmfgt_vf_f64m1_b64 (vfloat64m1_t op1, double op2, size_t vl) {
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vbool32_t test_vmfgt_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vbool32_t test_vmfgt_vf_f64m2_b32 (vfloat64m2_t op1, double op2, size_t vl) {
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool16_t test_vmfgt_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool16_t test_vmfgt_vf_f64m4_b16 (vfloat64m4_t op1, double op2, size_t vl) {
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vbool8_t test_vmfgt_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vbool8_t test_vmfgt_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) {
   return vmfgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vbool64_t test_vmfgt_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vbool64_t test_vmfgt_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vbool32_t test_vmfgt_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vbool32_t test_vmfgt_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vbool16_t test_vmfgt_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vbool16_t test_vmfgt_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vbool8_t test_vmfgt_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vbool8_t test_vmfgt_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vbool4_t test_vmfgt_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vbool4_t test_vmfgt_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vbool64_t test_vmfgt_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vbool64_t test_vmfgt_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vbool32_t test_vmfgt_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vbool32_t test_vmfgt_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vbool16_t test_vmfgt_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vbool16_t test_vmfgt_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vbool8_t test_vmfgt_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
   return vmfgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
index fe1611340d992..cad932776fe24 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -119,7 +108,6 @@ vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -171,7 +155,6 @@ vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +164,6 @@ vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +173,6 @@ vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
   return vmfle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +184,6 @@ vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -237,7 +215,6 @@ vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +226,6 @@ vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -260,7 +236,6 @@ vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +247,6 @@ vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -283,7 +257,6 @@ vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +268,6 @@ vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +278,6 @@ vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +289,6 @@ vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -329,7 +299,6 @@ vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +310,6 @@ vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -352,7 +320,6 @@ vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +331,6 @@ vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -375,7 +341,6 @@ vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +352,6 @@ vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
index 231433d8342b8..955cec4918891 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -119,7 +108,6 @@ vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -171,7 +155,6 @@ vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +164,6 @@ vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +173,6 @@ vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
   return vmflt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +184,6 @@ vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -237,7 +215,6 @@ vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +226,6 @@ vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -260,7 +236,6 @@ vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +247,6 @@ vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -283,7 +257,6 @@ vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +268,6 @@ vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +278,6 @@ vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +289,6 @@ vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -329,7 +299,6 @@ vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +310,6 @@ vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -352,7 +320,6 @@ vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +331,6 @@ vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -375,7 +341,6 @@ vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +352,6 @@ vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmflt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
index 7a14a8efcccb5..890682f03535d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -119,7 +108,6 @@ vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -129,7 +117,6 @@ vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -140,7 +127,6 @@ vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -150,7 +136,6 @@ vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -161,7 +146,6 @@ vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -171,7 +155,6 @@ vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -181,7 +164,6 @@ vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +173,6 @@ vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
   return vmfne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -203,7 +184,6 @@ vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -226,7 +205,6 @@ vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -237,7 +215,6 @@ vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -249,7 +226,6 @@ vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -260,7 +236,6 @@ vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -272,7 +247,6 @@ vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -283,7 +257,6 @@ vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -295,7 +268,6 @@ vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -306,7 +278,6 @@ vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -318,7 +289,6 @@ vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -329,7 +299,6 @@ vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -341,7 +310,6 @@ vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -352,7 +320,6 @@ vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -364,7 +331,6 @@ vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -375,7 +341,6 @@ vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -387,7 +352,6 @@ vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmfne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
index da001e702a9ae..386cbd7bff045 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmin(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vminu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmmv.c
index accabfad1fe42..b4d949babf665 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmmv.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmmv_m_b1 (vbool1_t op1, size_t vl) {
   return vmmv(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmmv_m_b2 (vbool2_t op1, size_t vl) {
   return vmmv(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmmv_m_b4 (vbool4_t op1, size_t vl) {
   return vmmv(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmmv_m_b8 (vbool8_t op1, size_t vl) {
   return vmmv(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmmv_m_b16 (vbool16_t op1, size_t vl) {
   return vmmv(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmmv_m_b32 (vbool32_t op1, size_t vl) {
   return vmmv(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmmv_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP1]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c
index b1fd9c9df8873..fea9524ec11a1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmnand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmnand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmnand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmnand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmnand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmnand(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnand_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c
index 7a3ef869236a4..073d0319a0f4a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnor_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnot.c
index 695f5df3073b7..5bfe530f254d7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnot.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmnot_m_b1 (vbool1_t op1, size_t vl) {
   return vmnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmnot_m_b2 (vbool2_t op1, size_t vl) {
   return vmnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmnot_m_b4 (vbool4_t op1, size_t vl) {
   return vmnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmnot_m_b8 (vbool8_t op1, size_t vl) {
   return vmnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmnot_m_b16 (vbool16_t op1, size_t vl) {
   return vmnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP1]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmnot_m_b32 (vbool32_t op1, size_t vl) {
   return vmnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmnot_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP1]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c
index 11514e1e1496a..202ac20b7fc5c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmor_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
   return vmor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmornot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmornot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmornot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmornot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmornot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmornot(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmornot_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c
index 8a8e5a2efa1e3..e642a96bc8f91 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -36,7 +33,6 @@ vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -46,7 +42,6 @@ vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -57,7 +52,6 @@ vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -99,7 +90,6 @@ vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -110,7 +100,6 @@ vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -120,7 +109,6 @@ vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -130,7 +118,6 @@ vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -141,7 +128,6 @@ vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -152,7 +138,6 @@ vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -162,7 +147,6 @@ vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -172,7 +156,6 @@ vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -183,7 +166,6 @@ vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -194,7 +176,6 @@ vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +185,6 @@ vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +194,6 @@ vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -225,7 +204,6 @@ vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -236,7 +214,6 @@ vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +223,6 @@ vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +232,6 @@ vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -267,7 +242,6 @@ vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -278,7 +252,6 @@ vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -288,7 +261,6 @@ vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -298,7 +270,6 @@ vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -309,7 +280,6 @@ vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -320,7 +290,6 @@ vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -331,7 +300,6 @@ vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -341,7 +309,6 @@ vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -352,7 +319,6 @@ vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -363,7 +329,6 @@ vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +339,6 @@ vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +348,6 @@ vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -395,7 +358,6 @@ vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -406,7 +368,6 @@ vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -416,7 +377,6 @@ vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -426,7 +386,6 @@ vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -437,7 +396,6 @@ vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -448,7 +406,6 @@ vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -458,7 +415,6 @@ vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -468,7 +424,6 @@ vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -479,7 +434,6 @@ vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -490,7 +444,6 @@ vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -500,7 +453,6 @@ vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +462,6 @@ vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -521,7 +472,6 @@ vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -532,7 +482,6 @@ vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -542,7 +491,6 @@ vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -552,7 +500,6 @@ vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -563,7 +510,6 @@ vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -574,7 +520,6 @@ vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -585,7 +530,6 @@ vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +539,6 @@ vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -606,7 +549,6 @@ vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -617,7 +559,6 @@ vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -627,7 +568,6 @@ vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -637,7 +577,6 @@ vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -648,7 +587,6 @@ vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -659,7 +597,6 @@ vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -669,7 +606,6 @@ vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -679,7 +615,6 @@ vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -690,7 +625,6 @@ vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -701,7 +635,6 @@ vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -711,7 +644,6 @@ vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -721,7 +653,6 @@ vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -732,7 +663,6 @@ vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -743,7 +673,6 @@ vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -753,7 +682,6 @@ vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -763,7 +691,6 @@ vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -774,7 +701,6 @@ vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -785,7 +711,6 @@ vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -795,7 +720,6 @@ vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -805,7 +729,6 @@ vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -816,7 +739,6 @@ vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -827,7 +749,6 @@ vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +758,6 @@ vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +767,6 @@ vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -858,7 +777,6 @@ vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -869,7 +787,6 @@ vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -879,7 +796,6 @@ vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -889,7 +805,6 @@ vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -900,7 +815,6 @@ vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -911,7 +825,6 @@ vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -921,7 +834,6 @@ vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -931,7 +843,6 @@ vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -942,7 +853,6 @@ vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -953,7 +863,6 @@ vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -963,7 +872,6 @@ vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -973,7 +881,6 @@ vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -984,7 +891,6 @@ vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -995,7 +901,6 @@ vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1005,7 +910,6 @@ vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1015,7 +919,6 @@ vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1026,7 +929,6 @@ vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1037,7 +939,6 @@ vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1047,7 +948,6 @@ vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1057,7 +957,6 @@ vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1068,7 +967,6 @@ vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1079,7 +977,6 @@ vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1089,7 +986,6 @@ vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1099,7 +995,6 @@ vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1110,7 +1005,6 @@ vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1015,6 @@ vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1131,7 +1024,6 @@ vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1141,7 +1033,6 @@ vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1152,7 +1043,6 @@ vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1163,7 +1053,6 @@ vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1173,7 +1062,6 @@ vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1183,7 +1071,6 @@ vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1081,6 @@ vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1205,7 +1091,6 @@ vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1215,7 +1100,6 @@ vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1225,7 +1109,6 @@ vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1236,7 +1119,6 @@ vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1247,7 +1129,6 @@ vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1258,7 +1139,6 @@ vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1268,7 +1148,6 @@ vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1279,7 +1158,6 @@ vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1290,7 +1168,6 @@ vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1301,7 +1178,6 @@ vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1187,6 @@ vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1197,6 @@ vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1207,6 @@ vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1343,7 +1216,6 @@ vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1225,6 @@ vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1235,6 @@ vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1245,6 @@ vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1385,7 +1254,6 @@ vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1395,7 +1263,6 @@ vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1406,7 +1273,6 @@ vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1417,7 +1283,6 @@ vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1427,7 +1292,6 @@ vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1437,7 +1301,6 @@ vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1448,7 +1311,6 @@ vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1459,7 +1321,6 @@ vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1469,7 +1330,6 @@ vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1339,6 @@ vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1349,6 @@ vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1501,7 +1359,6 @@ vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1512,7 +1369,6 @@ vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1522,7 +1378,6 @@ vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1533,7 +1388,6 @@ vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1398,6 @@ vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1407,6 @@ vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1416,6 @@ vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1575,7 +1426,6 @@ vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1586,7 +1436,6 @@ vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1596,7 +1445,6 @@ vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1606,7 +1454,6 @@ vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1617,7 +1464,6 @@ vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1628,7 +1474,6 @@ vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1638,7 +1483,6 @@ vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1648,7 +1492,6 @@ vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1659,7 +1502,6 @@ vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1670,7 +1512,6 @@ vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1680,7 +1521,6 @@ vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1690,7 +1530,6 @@ vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1701,7 +1540,6 @@ vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1712,7 +1550,6 @@ vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1722,7 +1559,6 @@ vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1732,7 +1568,6 @@ vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1743,7 +1578,6 @@ vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1588,6 @@ vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1597,6 @@ vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1606,6 @@ vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1785,7 +1616,6 @@ vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1796,7 +1626,6 @@ vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1806,7 +1635,6 @@ vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1816,7 +1644,6 @@ vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1827,7 +1654,6 @@ vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -1838,7 +1664,6 @@ vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2,
   return vmsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1848,7 +1673,6 @@ vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsbc(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
index 551b0d8cefb8a..7270fd8b4fc56 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -12,7 +11,6 @@
 //
 vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { return vmsbf(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -20,7 +18,6 @@ vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { return vmsbf(op1, vl); }
 //
 vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { return vmsbf(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -28,7 +25,6 @@ vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { return vmsbf(op1, vl); }
 //
 vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { return vmsbf(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -36,7 +32,6 @@ vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { return vmsbf(op1, vl); }
 //
 vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { return vmsbf(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +39,6 @@ vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { return vmsbf(op1, vl); }
 //
 vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { return vmsbf(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -52,7 +46,6 @@ vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { return vmsbf(op1, vl); }
 //
 vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { return vmsbf(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -60,7 +53,6 @@ vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { return vmsbf(op1, vl); }
 //
 vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { return vmsbf(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -71,7 +63,6 @@ vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
   return vmsbf(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -82,7 +73,6 @@ vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
   return vmsbf(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -93,7 +83,6 @@ vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
   return vmsbf(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -104,7 +93,6 @@ vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
   return vmsbf(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -115,7 +103,6 @@ vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
   return vmsbf(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -126,7 +113,6 @@ vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
   return vmsbf(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsbf_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
index f0b4841b5e9d6..a20b73278dedd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -457,7 +412,6 @@ vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -467,7 +421,6 @@ vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -477,7 +430,6 @@ vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -487,7 +439,6 @@ vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -497,7 +448,6 @@ vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -507,7 +457,6 @@ vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -517,7 +466,6 @@ vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -527,7 +475,6 @@ vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -537,7 +484,6 @@ vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -547,7 +493,6 @@ vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -557,7 +502,6 @@ vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -567,7 +511,6 @@ vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -577,7 +520,6 @@ vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -587,7 +529,6 @@ vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -598,7 +539,6 @@ vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -608,7 +548,6 @@ vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -619,7 +558,6 @@ vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -629,7 +567,6 @@ vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -639,7 +576,6 @@ vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -649,7 +585,6 @@ vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -659,7 +594,6 @@ vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -669,7 +603,6 @@ vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -679,7 +612,6 @@ vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -689,7 +621,6 @@ vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -699,7 +630,6 @@ vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -709,7 +639,6 @@ vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -720,7 +649,6 @@ vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -730,7 +658,6 @@ vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -740,7 +667,6 @@ vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -750,7 +676,6 @@ vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -760,7 +685,6 @@ vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -770,7 +694,6 @@ vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -780,7 +703,6 @@ vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -790,7 +712,6 @@ vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -800,7 +721,6 @@ vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -810,7 +730,6 @@ vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -820,7 +739,6 @@ vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -830,7 +748,6 @@ vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -840,7 +757,6 @@ vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -850,7 +766,6 @@ vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -860,7 +775,6 @@ vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -870,7 +784,6 @@ vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -880,7 +793,6 @@ vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -890,7 +802,6 @@ vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmseq(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -901,7 +812,6 @@ vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -912,7 +822,6 @@ vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -923,7 +832,6 @@ vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +842,6 @@ vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -945,7 +852,6 @@ vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +862,6 @@ vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -967,7 +872,6 @@ vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -978,7 +882,6 @@ vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -989,7 +892,6 @@ vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1000,7 +902,6 @@ vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +912,6 @@ vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1022,7 +922,6 @@ vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1033,7 +932,6 @@ vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +942,6 @@ vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1056,7 +953,6 @@ vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1067,7 +963,6 @@ vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1079,7 +974,6 @@ vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1090,7 +984,6 @@ vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1101,7 +994,6 @@ vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1112,7 +1004,6 @@ vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1123,7 +1014,6 @@ vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1024,6 @@ vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1145,7 +1034,6 @@ vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1156,7 +1044,6 @@ vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1167,7 +1054,6 @@ vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1178,7 +1064,6 @@ vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1190,7 +1075,6 @@ vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1201,7 +1085,6 @@ vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1212,7 +1095,6 @@ vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1223,7 +1105,6 @@ vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1115,6 @@ vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1245,7 +1125,6 @@ vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1256,7 +1135,6 @@ vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1267,7 +1145,6 @@ vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1278,7 +1155,6 @@ vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1289,7 +1165,6 @@ vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1300,7 +1175,6 @@ vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1185,6 @@ vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1195,6 @@ vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1205,6 @@ vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1215,6 @@ vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1355,7 +1225,6 @@ vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1366,7 +1235,6 @@ vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1377,7 +1245,6 @@ vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1389,7 +1256,6 @@ vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1400,7 +1266,6 @@ vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1412,7 +1277,6 @@ vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1423,7 +1287,6 @@ vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1435,7 +1298,6 @@ vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1446,7 +1308,6 @@ vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1457,7 +1318,6 @@ vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1468,7 +1328,6 @@ vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1338,6 @@ vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1348,6 @@ vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1501,7 +1358,6 @@ vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1512,7 +1368,6 @@ vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1523,7 +1378,6 @@ vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1388,6 @@ vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1546,7 +1399,6 @@ vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1558,7 +1410,6 @@ vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1570,7 +1421,6 @@ vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1582,7 +1432,6 @@ vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1443,6 @@ vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1605,7 +1453,6 @@ vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1616,7 +1463,6 @@ vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1627,7 +1473,6 @@ vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1638,7 +1483,6 @@ vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1493,6 @@ vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1503,6 @@ vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1671,7 +1513,6 @@ vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1524,6 @@ vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1535,6 @@ vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1546,6 @@ vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1556,6 @@ vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1567,6 @@ vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1577,6 @@ vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1752,7 +1587,6 @@ vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1763,7 +1597,6 @@ vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1607,6 @@ vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1785,7 +1617,6 @@ vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1797,7 +1628,6 @@ vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1808,7 +1638,6 @@ vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1820,7 +1649,6 @@ vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1831,7 +1659,6 @@ vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1843,7 +1670,6 @@ vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1854,7 +1680,6 @@ vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1865,7 +1690,6 @@ vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmseq(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
index 4a990e8f4dded..3f7f4449cbeea 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmsge_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmsge_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmsge_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmsge_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsge_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmsge_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmsge_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmsge_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmsge_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmsge_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmsge_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmsge_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmsge_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmsge_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vbool64_t test_vmsge_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl)
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vbool64_t test_vmsge_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vbool32_t test_vmsge_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl)
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vbool32_t test_vmsge_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vbool16_t test_vmsge_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vbool16_t test_vmsge_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vbool8_t test_vmsge_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vbool8_t test_vmsge_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vbool4_t test_vmsge_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vbool4_t test_vmsge_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vbool2_t test_vmsge_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vbool2_t test_vmsge_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vbool64_t test_vmsge_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl)
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vbool64_t test_vmsge_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vbool32_t test_vmsge_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vbool32_t test_vmsge_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vbool16_t test_vmsge_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vbool16_t test_vmsge_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vbool8_t test_vmsge_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vbool8_t test_vmsge_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vbool4_t test_vmsge_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vbool4_t test_vmsge_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vbool64_t test_vmsge_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vbool64_t test_vmsge_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vbool32_t test_vmsge_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vbool32_t test_vmsge_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vbool16_t test_vmsge_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vbool16_t test_vmsge_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vbool8_t test_vmsge_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vbool8_t test_vmsge_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsge(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vbool8_t test_vmsgeu_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vbool8_t test_vmsgeu_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vbool4_t test_vmsgeu_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vbool4_t test_vmsgeu_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vbool2_t test_vmsgeu_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vbool2_t test_vmsgeu_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vbool1_t test_vmsgeu_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vbool1_t test_vmsgeu_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vbool16_t test_vmsgeu_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vbool16_t test_vmsgeu_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vbool8_t test_vmsgeu_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vbool8_t test_vmsgeu_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vbool4_t test_vmsgeu_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vbool4_t test_vmsgeu_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vbool2_t test_vmsgeu_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vbool2_t test_vmsgeu_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vbool32_t test_vmsgeu_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vbool32_t test_vmsgeu_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vbool16_t test_vmsgeu_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vbool16_t test_vmsgeu_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vbool8_t test_vmsgeu_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vbool8_t test_vmsgeu_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vbool4_t test_vmsgeu_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vbool4_t test_vmsgeu_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vbool64_t test_vmsgeu_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vbool64_t test_vmsgeu_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vbool32_t test_vmsgeu_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vbool32_t test_vmsgeu_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vbool16_t test_vmsgeu_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl)
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vbool16_t test_vmsgeu_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vbool8_t test_vmsgeu_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vbool8_t test_vmsgeu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsgeu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vbool64_t test_vmsge_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vbool64_t test_vmsge_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vbool32_t test_vmsge_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vbool32_t test_vmsge_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vbool16_t test_vmsge_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vbool16_t test_vmsge_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vbool8_t test_vmsge_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vbool8_t test_vmsge_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vbool4_t test_vmsge_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vbool4_t test_vmsge_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vbool2_t test_vmsge_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vbool2_t test_vmsge_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsge.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vbool1_t test_vmsge_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsge.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vbool1_t test_vmsge_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vbool64_t test_vmsge_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vbool64_t test_vmsge_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vbool32_t test_vmsge_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vbool32_t test_vmsge_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vbool16_t test_vmsge_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vbool16_t test_vmsge_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vbool8_t test_vmsge_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vbool8_t test_vmsge_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vbool4_t test_vmsge_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vbool4_t test_vmsge_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vbool2_t test_vmsge_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vbool2_t test_vmsge_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vbool64_t test_vmsge_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vbool64_t test_vmsge_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vbool32_t test_vmsge_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vbool32_t test_vmsge_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vbool16_t test_vmsge_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vbool16_t test_vmsge_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vbool8_t test_vmsge_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vbool8_t test_vmsge_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vbool4_t test_vmsge_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vbool4_t test_vmsge_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vbool64_t test_vmsge_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vbool64_t test_vmsge_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vbool32_t test_vmsge_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vbool32_t test_vmsge_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vbool16_t test_vmsge_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vbool16_t test_vmsge_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vbool8_t test_vmsge_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vbool8_t test_vmsge_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t
   return vmsge(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vbool8_t test_vmsgeu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vbool8_t test_vmsgeu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vbool4_t test_vmsgeu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vbool4_t test_vmsgeu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vbool2_t test_vmsgeu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vbool2_t test_vmsgeu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vbool1_t test_vmsgeu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vbool1_t test_vmsgeu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vbool16_t test_vmsgeu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vbool16_t test_vmsgeu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vbool8_t test_vmsgeu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vbool8_t test_vmsgeu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vbool4_t test_vmsgeu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vbool4_t test_vmsgeu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vbool2_t test_vmsgeu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vbool2_t test_vmsgeu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vbool32_t test_vmsgeu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vbool32_t test_vmsgeu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vbool16_t test_vmsgeu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vbool16_t test_vmsgeu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vbool8_t test_vmsgeu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vbool8_t test_vmsgeu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vbool4_t test_vmsgeu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vbool4_t test_vmsgeu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vbool64_t test_vmsgeu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vbool64_t test_vmsgeu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vbool32_t test_vmsgeu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vbool32_t test_vmsgeu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vbool16_t test_vmsgeu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vbool16_t test_vmsgeu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vbool8_t test_vmsgeu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
   return vmsgeu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
index 634db12e88ca0..6cba226fa9a4b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmsgt_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmsgt_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmsgt_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmsgt_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsgt_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmsgt_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmsgt_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmsgt_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmsgt_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmsgt_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmsgt_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmsgt_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmsgt_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmsgt_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vbool64_t test_vmsgt_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl)
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vbool64_t test_vmsgt_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vbool32_t test_vmsgt_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl)
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vbool32_t test_vmsgt_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vbool16_t test_vmsgt_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vbool16_t test_vmsgt_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vbool8_t test_vmsgt_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vbool8_t test_vmsgt_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vbool4_t test_vmsgt_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vbool4_t test_vmsgt_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vbool2_t test_vmsgt_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vbool2_t test_vmsgt_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vbool64_t test_vmsgt_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl)
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vbool64_t test_vmsgt_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vbool32_t test_vmsgt_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vbool32_t test_vmsgt_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vbool16_t test_vmsgt_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vbool16_t test_vmsgt_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vbool8_t test_vmsgt_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vbool8_t test_vmsgt_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vbool4_t test_vmsgt_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vbool4_t test_vmsgt_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vbool64_t test_vmsgt_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vbool64_t test_vmsgt_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vbool32_t test_vmsgt_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vbool32_t test_vmsgt_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vbool16_t test_vmsgt_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vbool16_t test_vmsgt_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vbool8_t test_vmsgt_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vbool8_t test_vmsgt_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsgt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vbool8_t test_vmsgtu_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vbool8_t test_vmsgtu_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vbool4_t test_vmsgtu_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vbool4_t test_vmsgtu_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vbool2_t test_vmsgtu_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vbool2_t test_vmsgtu_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vbool1_t test_vmsgtu_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vbool1_t test_vmsgtu_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vbool16_t test_vmsgtu_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vbool16_t test_vmsgtu_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vbool8_t test_vmsgtu_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vbool8_t test_vmsgtu_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vbool4_t test_vmsgtu_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vbool4_t test_vmsgtu_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vbool2_t test_vmsgtu_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vbool2_t test_vmsgtu_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vbool32_t test_vmsgtu_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vbool32_t test_vmsgtu_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vbool16_t test_vmsgtu_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vbool16_t test_vmsgtu_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vbool8_t test_vmsgtu_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vbool8_t test_vmsgtu_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vbool4_t test_vmsgtu_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vbool4_t test_vmsgtu_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vbool64_t test_vmsgtu_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vbool64_t test_vmsgtu_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vbool32_t test_vmsgtu_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vbool32_t test_vmsgtu_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vbool16_t test_vmsgtu_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl)
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vbool16_t test_vmsgtu_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vbool8_t test_vmsgtu_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vbool8_t test_vmsgtu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsgtu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vbool64_t test_vmsgt_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vbool64_t test_vmsgt_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vbool32_t test_vmsgt_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vbool32_t test_vmsgt_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vbool16_t test_vmsgt_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vbool16_t test_vmsgt_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vbool8_t test_vmsgt_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vbool8_t test_vmsgt_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vbool4_t test_vmsgt_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vbool4_t test_vmsgt_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vbool2_t test_vmsgt_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vbool2_t test_vmsgt_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vbool1_t test_vmsgt_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vbool1_t test_vmsgt_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vbool64_t test_vmsgt_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vbool64_t test_vmsgt_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vbool32_t test_vmsgt_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vbool32_t test_vmsgt_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vbool16_t test_vmsgt_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vbool16_t test_vmsgt_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vbool8_t test_vmsgt_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vbool8_t test_vmsgt_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vbool4_t test_vmsgt_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vbool4_t test_vmsgt_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vbool2_t test_vmsgt_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vbool2_t test_vmsgt_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vbool64_t test_vmsgt_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vbool64_t test_vmsgt_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vbool32_t test_vmsgt_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vbool32_t test_vmsgt_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vbool16_t test_vmsgt_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vbool16_t test_vmsgt_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vbool8_t test_vmsgt_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vbool8_t test_vmsgt_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vbool4_t test_vmsgt_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vbool4_t test_vmsgt_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vbool64_t test_vmsgt_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vbool64_t test_vmsgt_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vbool32_t test_vmsgt_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vbool32_t test_vmsgt_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vbool16_t test_vmsgt_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vbool16_t test_vmsgt_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vbool8_t test_vmsgt_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vbool8_t test_vmsgt_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t
   return vmsgt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vbool8_t test_vmsgtu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vbool8_t test_vmsgtu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vbool4_t test_vmsgtu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vbool4_t test_vmsgtu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vbool2_t test_vmsgtu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vbool2_t test_vmsgtu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vbool1_t test_vmsgtu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vbool1_t test_vmsgtu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vbool16_t test_vmsgtu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vbool16_t test_vmsgtu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vbool8_t test_vmsgtu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vbool8_t test_vmsgtu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vbool4_t test_vmsgtu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vbool4_t test_vmsgtu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vbool2_t test_vmsgtu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vbool2_t test_vmsgtu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vbool32_t test_vmsgtu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vbool32_t test_vmsgtu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vbool16_t test_vmsgtu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vbool16_t test_vmsgtu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vbool8_t test_vmsgtu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vbool8_t test_vmsgtu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vbool4_t test_vmsgtu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vbool4_t test_vmsgtu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vbool64_t test_vmsgtu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vbool64_t test_vmsgtu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vbool32_t test_vmsgtu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vbool32_t test_vmsgtu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vbool16_t test_vmsgtu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vbool16_t test_vmsgtu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vbool8_t test_vmsgtu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
   return vmsgtu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
index d655d422539b3..cade3990280db 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -12,7 +11,6 @@
 //
 vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { return vmsif(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -20,7 +18,6 @@ vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { return vmsif(op1, vl); }
 //
 vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { return vmsif(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -28,7 +25,6 @@ vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { return vmsif(op1, vl); }
 //
 vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { return vmsif(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -36,7 +32,6 @@ vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { return vmsif(op1, vl); }
 //
 vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { return vmsif(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +39,6 @@ vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { return vmsif(op1, vl); }
 //
 vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { return vmsif(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -52,7 +46,6 @@ vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { return vmsif(op1, vl); }
 //
 vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { return vmsif(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -60,7 +53,6 @@ vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { return vmsif(op1, vl); }
 //
 vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { return vmsif(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -71,7 +63,6 @@ vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
   return vmsif(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -82,7 +73,6 @@ vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
   return vmsif(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -93,7 +83,6 @@ vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
   return vmsif(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -104,7 +93,6 @@ vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
   return vmsif(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -115,7 +103,6 @@ vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
   return vmsif(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -126,7 +113,6 @@ vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
   return vmsif(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsif_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
index 12b66377b6893..39fb95f7dc8cc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsle(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -458,7 +413,6 @@ vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -468,7 +422,6 @@ vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -479,7 +432,6 @@ vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -489,7 +441,6 @@ vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -500,7 +451,6 @@ vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +460,6 @@ vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -520,7 +469,6 @@ vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -530,7 +478,6 @@ vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -540,7 +487,6 @@ vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -550,7 +496,6 @@ vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -560,7 +505,6 @@ vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -570,7 +514,6 @@ vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -580,7 +523,6 @@ vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -590,7 +532,6 @@ vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -601,7 +542,6 @@ vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -611,7 +551,6 @@ vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -622,7 +561,6 @@ vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -632,7 +570,6 @@ vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -643,7 +580,6 @@ vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -653,7 +589,6 @@ vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -663,7 +598,6 @@ vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -673,7 +607,6 @@ vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -683,7 +616,6 @@ vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -693,7 +625,6 @@ vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -703,7 +634,6 @@ vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -713,7 +643,6 @@ vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +653,6 @@ vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +662,6 @@ vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -745,7 +672,6 @@ vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -755,7 +681,6 @@ vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -766,7 +691,6 @@ vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -776,7 +700,6 @@ vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -786,7 +709,6 @@ vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -796,7 +718,6 @@ vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -806,7 +727,6 @@ vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -816,7 +736,6 @@ vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +746,6 @@ vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +755,6 @@ vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -848,7 +765,6 @@ vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -858,7 +774,6 @@ vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -869,7 +784,6 @@ vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -879,7 +793,6 @@ vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -889,7 +802,6 @@ vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -899,7 +811,6 @@ vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsleu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -910,7 +821,6 @@ vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -921,7 +831,6 @@ vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +841,6 @@ vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -943,7 +851,6 @@ vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +861,6 @@ vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -965,7 +871,6 @@ vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -976,7 +881,6 @@ vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -987,7 +891,6 @@ vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -998,7 +901,6 @@ vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +911,6 @@ vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1020,7 +921,6 @@ vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +931,6 @@ vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1042,7 +941,6 @@ vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +951,6 @@ vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1065,7 +962,6 @@ vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +972,6 @@ vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1088,7 +983,6 @@ vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1099,7 +993,6 @@ vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1110,7 +1003,6 @@ vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1013,6 @@ vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1023,6 @@ vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1143,7 +1033,6 @@ vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1043,6 @@ vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1165,7 +1053,6 @@ vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1176,7 +1063,6 @@ vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1187,7 +1073,6 @@ vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1199,7 +1084,6 @@ vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1210,7 +1094,6 @@ vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1221,7 +1104,6 @@ vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1232,7 +1114,6 @@ vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1243,7 +1124,6 @@ vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1134,6 @@ vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1265,7 +1144,6 @@ vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1276,7 +1154,6 @@ vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1287,7 +1164,6 @@ vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1298,7 +1174,6 @@ vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1309,7 +1184,6 @@ vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1320,7 +1194,6 @@ vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1331,7 +1204,6 @@ vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1342,7 +1214,6 @@ vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1224,6 @@ vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1234,6 @@ vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1244,6 @@ vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1254,6 @@ vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsle(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1398,7 +1265,6 @@ vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1409,7 +1275,6 @@ vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1421,7 +1286,6 @@ vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1296,6 @@ vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1307,6 @@ vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1455,7 +1317,6 @@ vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1466,7 +1327,6 @@ vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1477,7 +1337,6 @@ vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1488,7 +1347,6 @@ vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1499,7 +1357,6 @@ vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1510,7 +1367,6 @@ vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1521,7 +1377,6 @@ vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1532,7 +1387,6 @@ vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1543,7 +1397,6 @@ vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1408,6 @@ vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1567,7 +1419,6 @@ vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1579,7 +1430,6 @@ vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1591,7 +1441,6 @@ vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1603,7 +1452,6 @@ vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1462,6 @@ vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1626,7 +1473,6 @@ vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1483,6 @@ vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1494,6 @@ vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1504,6 @@ vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1672,7 +1515,6 @@ vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1525,6 @@ vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1536,6 @@ vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1547,6 @@ vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1719,7 +1558,6 @@ vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1568,6 @@ vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1742,7 +1579,6 @@ vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1589,6 @@ vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1765,7 +1600,6 @@ vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1610,6 @@ vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1788,7 +1621,6 @@ vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1631,6 @@ vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1811,7 +1642,6 @@ vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1652,6 @@ vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1834,7 +1663,6 @@ vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1673,6 @@ vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1857,7 +1684,6 @@ vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1694,6 @@ vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1880,7 +1705,6 @@ vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsleu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
index 07b864c76ae3d..0ebee8ee46cc7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmslt(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -458,7 +413,6 @@ vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -468,7 +422,6 @@ vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -479,7 +432,6 @@ vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -489,7 +441,6 @@ vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -500,7 +451,6 @@ vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +460,6 @@ vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -520,7 +469,6 @@ vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -530,7 +478,6 @@ vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -540,7 +487,6 @@ vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -550,7 +496,6 @@ vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -560,7 +505,6 @@ vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -570,7 +514,6 @@ vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -580,7 +523,6 @@ vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -590,7 +532,6 @@ vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -601,7 +542,6 @@ vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -611,7 +551,6 @@ vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -622,7 +561,6 @@ vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -632,7 +570,6 @@ vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -643,7 +580,6 @@ vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -653,7 +589,6 @@ vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -663,7 +598,6 @@ vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -673,7 +607,6 @@ vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -683,7 +616,6 @@ vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -693,7 +625,6 @@ vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -703,7 +634,6 @@ vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -713,7 +643,6 @@ vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +653,6 @@ vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +662,6 @@ vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -745,7 +672,6 @@ vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -755,7 +681,6 @@ vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -766,7 +691,6 @@ vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -776,7 +700,6 @@ vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -786,7 +709,6 @@ vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -796,7 +718,6 @@ vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -806,7 +727,6 @@ vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -816,7 +736,6 @@ vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +746,6 @@ vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +755,6 @@ vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -848,7 +765,6 @@ vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -858,7 +774,6 @@ vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -869,7 +784,6 @@ vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -879,7 +793,6 @@ vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -889,7 +802,6 @@ vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -899,7 +811,6 @@ vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsltu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -910,7 +821,6 @@ vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -921,7 +831,6 @@ vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +841,6 @@ vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -943,7 +851,6 @@ vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +861,6 @@ vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -965,7 +871,6 @@ vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -976,7 +881,6 @@ vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -987,7 +891,6 @@ vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -998,7 +901,6 @@ vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +911,6 @@ vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1020,7 +921,6 @@ vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +931,6 @@ vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1042,7 +941,6 @@ vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1053,7 +951,6 @@ vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1065,7 +962,6 @@ vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1076,7 +972,6 @@ vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1088,7 +983,6 @@ vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1099,7 +993,6 @@ vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1110,7 +1003,6 @@ vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1013,6 @@ vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1023,6 @@ vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1143,7 +1033,6 @@ vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1043,6 @@ vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1165,7 +1053,6 @@ vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1176,7 +1063,6 @@ vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1187,7 +1073,6 @@ vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1199,7 +1084,6 @@ vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1210,7 +1094,6 @@ vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1221,7 +1104,6 @@ vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1232,7 +1114,6 @@ vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1243,7 +1124,6 @@ vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1134,6 @@ vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1265,7 +1144,6 @@ vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1276,7 +1154,6 @@ vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1287,7 +1164,6 @@ vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1298,7 +1174,6 @@ vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1309,7 +1184,6 @@ vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1320,7 +1194,6 @@ vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1331,7 +1204,6 @@ vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1342,7 +1214,6 @@ vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1224,6 @@ vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1234,6 @@ vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1244,6 @@ vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1254,6 @@ vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmslt(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1398,7 +1265,6 @@ vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1409,7 +1275,6 @@ vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1421,7 +1286,6 @@ vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1432,7 +1296,6 @@ vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1307,6 @@ vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1455,7 +1317,6 @@ vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1466,7 +1327,6 @@ vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1477,7 +1337,6 @@ vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1488,7 +1347,6 @@ vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1499,7 +1357,6 @@ vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1510,7 +1367,6 @@ vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1521,7 +1377,6 @@ vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1532,7 +1387,6 @@ vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1543,7 +1397,6 @@ vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1555,7 +1408,6 @@ vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1567,7 +1419,6 @@ vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1579,7 +1430,6 @@ vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1591,7 +1441,6 @@ vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1603,7 +1452,6 @@ vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1462,6 @@ vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1626,7 +1473,6 @@ vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1483,6 @@ vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1494,6 @@ vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1504,6 @@ vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1672,7 +1515,6 @@ vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1525,6 @@ vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1536,6 @@ vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1547,6 @@ vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1719,7 +1558,6 @@ vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1568,6 @@ vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1742,7 +1579,6 @@ vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1589,6 @@ vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1765,7 +1600,6 @@ vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1610,6 @@ vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1788,7 +1621,6 @@ vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1631,6 @@ vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1811,7 +1642,6 @@ vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1652,6 @@ vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1834,7 +1663,6 @@ vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1673,6 @@ vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1857,7 +1684,6 @@ vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1694,6 @@ vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1880,7 +1705,6 @@ vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsltu(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
index 61b2d97a957ca..9916376f7bb30 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -447,7 +403,6 @@ vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -457,7 +412,6 @@ vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -467,7 +421,6 @@ vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -477,7 +430,6 @@ vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -487,7 +439,6 @@ vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -497,7 +448,6 @@ vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -507,7 +457,6 @@ vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -517,7 +466,6 @@ vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -527,7 +475,6 @@ vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -537,7 +484,6 @@ vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -547,7 +493,6 @@ vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -557,7 +502,6 @@ vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -567,7 +511,6 @@ vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -577,7 +520,6 @@ vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -587,7 +529,6 @@ vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -598,7 +539,6 @@ vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -608,7 +548,6 @@ vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -619,7 +558,6 @@ vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -629,7 +567,6 @@ vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -639,7 +576,6 @@ vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -649,7 +585,6 @@ vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -659,7 +594,6 @@ vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -669,7 +603,6 @@ vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -679,7 +612,6 @@ vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -689,7 +621,6 @@ vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -699,7 +630,6 @@ vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -709,7 +639,6 @@ vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -720,7 +649,6 @@ vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -730,7 +658,6 @@ vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -740,7 +667,6 @@ vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -750,7 +676,6 @@ vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -760,7 +685,6 @@ vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -770,7 +694,6 @@ vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -780,7 +703,6 @@ vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -790,7 +712,6 @@ vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -800,7 +721,6 @@ vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -810,7 +730,6 @@ vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -820,7 +739,6 @@ vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -830,7 +748,6 @@ vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -840,7 +757,6 @@ vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -850,7 +766,6 @@ vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -860,7 +775,6 @@ vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -870,7 +784,6 @@ vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -880,7 +793,6 @@ vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -890,7 +802,6 @@ vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmsne(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -901,7 +812,6 @@ vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -912,7 +822,6 @@ vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -923,7 +832,6 @@ vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +842,6 @@ vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -945,7 +852,6 @@ vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +862,6 @@ vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -967,7 +872,6 @@ vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -978,7 +882,6 @@ vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -989,7 +892,6 @@ vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1000,7 +902,6 @@ vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +912,6 @@ vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1022,7 +922,6 @@ vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1033,7 +932,6 @@ vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +942,6 @@ vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1056,7 +953,6 @@ vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1067,7 +963,6 @@ vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1079,7 +974,6 @@ vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1090,7 +984,6 @@ vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1101,7 +994,6 @@ vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1112,7 +1004,6 @@ vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1123,7 +1014,6 @@ vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1024,6 @@ vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1145,7 +1034,6 @@ vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1156,7 +1044,6 @@ vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1167,7 +1054,6 @@ vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1178,7 +1064,6 @@ vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1190,7 +1075,6 @@ vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1201,7 +1085,6 @@ vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1212,7 +1095,6 @@ vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1223,7 +1105,6 @@ vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1115,6 @@ vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1245,7 +1125,6 @@ vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1256,7 +1135,6 @@ vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1267,7 +1145,6 @@ vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1278,7 +1155,6 @@ vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1289,7 +1165,6 @@ vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1300,7 +1175,6 @@ vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1311,7 +1185,6 @@ vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1322,7 +1195,6 @@ vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1333,7 +1205,6 @@ vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1215,6 @@ vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1355,7 +1225,6 @@ vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1366,7 +1235,6 @@ vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1377,7 +1245,6 @@ vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1389,7 +1256,6 @@ vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1400,7 +1266,6 @@ vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1412,7 +1277,6 @@ vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1423,7 +1287,6 @@ vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1435,7 +1298,6 @@ vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1446,7 +1308,6 @@ vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1457,7 +1318,6 @@ vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1468,7 +1328,6 @@ vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1338,6 @@ vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1348,6 @@ vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1501,7 +1358,6 @@ vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1512,7 +1368,6 @@ vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1523,7 +1378,6 @@ vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1388,6 @@ vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1546,7 +1399,6 @@ vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1558,7 +1410,6 @@ vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1570,7 +1421,6 @@ vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1582,7 +1432,6 @@ vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1443,6 @@ vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1605,7 +1453,6 @@ vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1616,7 +1463,6 @@ vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1627,7 +1473,6 @@ vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1638,7 +1483,6 @@ vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1493,6 @@ vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1660,7 +1503,6 @@ vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1671,7 +1513,6 @@ vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1683,7 +1524,6 @@ vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1535,6 @@ vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1546,6 @@ vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1556,6 @@ vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1567,6 @@ vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1577,6 @@ vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1752,7 +1587,6 @@ vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1763,7 +1597,6 @@ vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1607,6 @@ vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1785,7 +1617,6 @@ vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1797,7 +1628,6 @@ vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1808,7 +1638,6 @@ vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1820,7 +1649,6 @@ vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1831,7 +1659,6 @@ vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1843,7 +1670,6 @@ vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1854,7 +1680,6 @@ vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1865,7 +1690,6 @@ vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
   return vmsne(mask, maskedoff, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
index 03cec0e38fec0..ed3ef05a7e3b6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -12,7 +11,6 @@
 //
 vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { return vmsof(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -20,7 +18,6 @@ vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { return vmsof(op1, vl); }
 //
 vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { return vmsof(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -28,7 +25,6 @@ vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { return vmsof(op1, vl); }
 //
 vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { return vmsof(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -36,7 +32,6 @@ vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { return vmsof(op1, vl); }
 //
 vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { return vmsof(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +39,6 @@ vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { return vmsof(op1, vl); }
 //
 vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { return vmsof(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -52,7 +46,6 @@ vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { return vmsof(op1, vl); }
 //
 vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { return vmsof(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -60,7 +53,6 @@ vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { return vmsof(op1, vl); }
 //
 vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { return vmsof(op1, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -71,7 +63,6 @@ vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
   return vmsof(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -82,7 +73,6 @@ vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
   return vmsof(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -93,7 +83,6 @@ vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
   return vmsof(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -104,7 +93,6 @@ vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
   return vmsof(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -115,7 +103,6 @@ vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
   return vmsof(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -126,7 +113,6 @@ vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
   return vmsof(mask, maskedoff, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmsof_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
index 129b8f5b3518d..7d0e68918484a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vmulh(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1588,6 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
   return vmulhu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1774,7 +1597,6 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1784,7 +1606,6 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1794,7 +1615,6 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1804,7 +1624,6 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1814,7 +1633,6 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1824,7 +1642,6 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1834,7 +1651,6 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1844,7 +1660,6 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1854,7 +1669,6 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1864,7 +1678,6 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1874,7 +1687,6 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1884,7 +1696,6 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1894,7 +1705,6 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1904,7 +1714,6 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1914,7 +1723,6 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1924,7 +1732,6 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1934,7 +1741,6 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1944,7 +1750,6 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1954,7 +1759,6 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1964,7 +1768,6 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1974,7 +1777,6 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1984,7 +1786,6 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1994,7 +1795,6 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2004,7 +1804,6 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2014,7 +1813,6 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2024,7 +1822,6 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2034,7 +1831,6 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2044,7 +1840,6 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2054,7 +1849,6 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2064,7 +1858,6 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2074,7 +1867,6 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2084,7 +1876,6 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2094,7 +1885,6 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2104,7 +1894,6 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2114,7 +1903,6 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2124,7 +1912,6 @@ vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2134,7 +1921,6 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2144,7 +1930,6 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2154,7 +1939,6 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2164,7 +1948,6 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2174,7 +1957,6 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2184,7 +1966,6 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -2194,7 +1975,6 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vmulhsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c
index 696775adc14d9..ed00804c40cf9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -43,7 +39,6 @@ vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
 //
 vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { return vmv_v(src, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -51,7 +46,6 @@ vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { return vmv_v(src, vl); }
 //
 vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { return vmv_v(src, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -59,7 +53,6 @@ vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { return vmv_v(src, vl); }
 //
 vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { return vmv_v(src, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -67,7 +60,6 @@ vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { return vmv_v(src, vl); }
 //
 vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { return vmv_v(src, vl); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -77,7 +69,6 @@ vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -87,7 +78,6 @@ vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -97,7 +87,6 @@ vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -107,7 +96,6 @@ vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -117,7 +105,6 @@ vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -127,7 +114,6 @@ vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -137,7 +123,6 @@ vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -147,7 +132,6 @@ vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -157,7 +141,6 @@ vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -167,7 +150,6 @@ vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -177,7 +159,6 @@ vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -187,7 +168,6 @@ vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -197,7 +177,6 @@ vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -207,7 +186,6 @@ vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -217,7 +195,6 @@ vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -227,7 +204,6 @@ vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -237,7 +213,6 @@ vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -247,7 +222,6 @@ vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -257,7 +231,6 @@ vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -267,7 +240,6 @@ vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -277,7 +249,6 @@ vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -287,7 +258,6 @@ vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -297,7 +267,6 @@ vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -307,7 +276,6 @@ vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -317,7 +285,6 @@ vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -327,7 +294,6 @@ vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -337,7 +303,6 @@ vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -347,7 +312,6 @@ vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -357,7 +321,6 @@ vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -367,7 +330,6 @@ vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -377,7 +339,6 @@ vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -387,7 +348,6 @@ vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -397,7 +357,6 @@ vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -407,7 +366,6 @@ vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -417,7 +375,6 @@ vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -427,7 +384,6 @@ vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -437,7 +393,6 @@ vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -447,7 +402,6 @@ vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -457,7 +411,6 @@ vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -467,7 +420,6 @@ vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -477,7 +429,6 @@ vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -487,7 +438,6 @@ vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -497,7 +447,6 @@ vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -507,7 +456,6 @@ vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -517,7 +465,6 @@ vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
@@ -527,7 +474,6 @@ vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) {
   return vmv_v(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf8_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> [[SRC:%.*]])
@@ -535,7 +481,6 @@ vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -545,7 +490,6 @@ vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dst, int8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> [[SRC:%.*]])
@@ -553,7 +497,6 @@ vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -563,7 +506,6 @@ vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dst, int8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> [[SRC:%.*]])
@@ -571,7 +513,6 @@ vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -581,7 +522,6 @@ vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dst, int8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> [[SRC:%.*]])
@@ -589,7 +529,6 @@ vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -599,7 +538,6 @@ vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dst, int8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]])
@@ -607,7 +545,6 @@ vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -617,7 +554,6 @@ vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dst, int8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]])
@@ -625,7 +561,6 @@ vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -635,7 +570,6 @@ vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dst, int8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]])
@@ -643,7 +577,6 @@ vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dst, int8_t src, size_t vl) {
 //
 int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -653,7 +586,6 @@ vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dst, int8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> [[SRC:%.*]])
@@ -661,7 +593,6 @@ vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dst, int8_t src, size_t vl) {
 //
 int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -671,7 +602,6 @@ vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dst, int16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> [[SRC:%.*]])
@@ -679,7 +609,6 @@ vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dst, int16_t src, size_t vl) {
 //
 int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -689,7 +618,6 @@ vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dst, int16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> [[SRC:%.*]])
@@ -697,7 +625,6 @@ vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dst, int16_t src, size_t vl) {
 //
 int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -707,7 +634,6 @@ vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dst, int16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]])
@@ -715,7 +641,6 @@ vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dst, int16_t src, size_t vl) {
 //
 int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -725,7 +650,6 @@ vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dst, int16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]])
@@ -733,7 +657,6 @@ vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dst, int16_t src, size_t vl) {
 //
 int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -743,7 +666,6 @@ vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dst, int16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]])
@@ -751,7 +673,6 @@ vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dst, int16_t src, size_t vl) {
 //
 int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -761,7 +682,6 @@ vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dst, int16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32(<vscale x 1 x i32> [[SRC:%.*]])
@@ -769,7 +689,6 @@ vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dst, int16_t src, size_t vl) {
 //
 int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -779,7 +698,6 @@ vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dst, int32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32(<vscale x 2 x i32> [[SRC:%.*]])
@@ -787,7 +705,6 @@ vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dst, int32_t src, size_t vl) {
 //
 int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -797,7 +714,6 @@ vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dst, int32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]])
@@ -805,7 +721,6 @@ vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dst, int32_t src, size_t vl) {
 //
 int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -815,7 +730,6 @@ vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dst, int32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]])
@@ -823,7 +737,6 @@ vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dst, int32_t src, size_t vl) {
 //
 int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -833,7 +746,6 @@ vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dst, int32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]])
@@ -841,7 +753,6 @@ vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dst, int32_t src, size_t vl) {
 //
 int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -851,7 +762,6 @@ vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dst, int32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> [[SRC:%.*]])
@@ -859,7 +769,6 @@ vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dst, int32_t src, size_t vl) {
 //
 int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -869,7 +778,6 @@ vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dst, int64_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]])
@@ -877,7 +785,6 @@ vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dst, int64_t src, size_t vl) {
 //
 int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -887,7 +794,6 @@ vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dst, int64_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]])
@@ -895,7 +801,6 @@ vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dst, int64_t src, size_t vl) {
 //
 int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -905,7 +810,6 @@ vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dst, int64_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]])
@@ -913,7 +817,6 @@ vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dst, int64_t src, size_t vl) {
 //
 int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -923,7 +826,6 @@ vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dst, int64_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> [[SRC:%.*]])
@@ -931,7 +833,6 @@ vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dst, int64_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -941,7 +842,6 @@ vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dst, uint8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> [[SRC:%.*]])
@@ -949,7 +849,6 @@ vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -959,7 +858,6 @@ vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dst, uint8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> [[SRC:%.*]])
@@ -967,7 +865,6 @@ vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -977,7 +874,6 @@ vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dst, uint8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> [[SRC:%.*]])
@@ -985,7 +881,6 @@ vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -995,7 +890,6 @@ vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dst, uint8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]])
@@ -1003,7 +897,6 @@ vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1013,7 +906,6 @@ vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dst, uint8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]])
@@ -1021,7 +913,6 @@ vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +922,6 @@ vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dst, uint8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]])
@@ -1039,7 +929,6 @@ vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dst, uint8_t src, size_t vl) {
 //
 uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1049,7 +938,6 @@ vuint8m8_t test_vmv_s_x_u8m8(vuint8m8_t dst, uint8_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> [[SRC:%.*]])
@@ -1057,7 +945,6 @@ vuint8m8_t test_vmv_s_x_u8m8(vuint8m8_t dst, uint8_t src, size_t vl) {
 //
 uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1067,7 +954,6 @@ vuint16mf4_t test_vmv_s_x_u16mf4(vuint16mf4_t dst, uint16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> [[SRC:%.*]])
@@ -1075,7 +961,6 @@ vuint16mf4_t test_vmv_s_x_u16mf4(vuint16mf4_t dst, uint16_t src, size_t vl) {
 //
 uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1085,7 +970,6 @@ vuint16mf2_t test_vmv_s_x_u16mf2(vuint16mf2_t dst, uint16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> [[SRC:%.*]])
@@ -1093,7 +977,6 @@ vuint16mf2_t test_vmv_s_x_u16mf2(vuint16mf2_t dst, uint16_t src, size_t vl) {
 //
 uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1103,7 +986,6 @@ vuint16m1_t test_vmv_s_x_u16m1(vuint16m1_t dst, uint16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]])
@@ -1111,7 +993,6 @@ vuint16m1_t test_vmv_s_x_u16m1(vuint16m1_t dst, uint16_t src, size_t vl) {
 //
 uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1121,7 +1002,6 @@ vuint16m2_t test_vmv_s_x_u16m2(vuint16m2_t dst, uint16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]])
@@ -1129,7 +1009,6 @@ vuint16m2_t test_vmv_s_x_u16m2(vuint16m2_t dst, uint16_t src, size_t vl) {
 //
 uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1139,7 +1018,6 @@ vuint16m4_t test_vmv_s_x_u16m4(vuint16m4_t dst, uint16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]])
@@ -1147,7 +1025,6 @@ vuint16m4_t test_vmv_s_x_u16m4(vuint16m4_t dst, uint16_t src, size_t vl) {
 //
 uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1157,7 +1034,6 @@ vuint16m8_t test_vmv_s_x_u16m8(vuint16m8_t dst, uint16_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32(<vscale x 1 x i32> [[SRC:%.*]])
@@ -1165,7 +1041,6 @@ vuint16m8_t test_vmv_s_x_u16m8(vuint16m8_t dst, uint16_t src, size_t vl) {
 //
 uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1175,7 +1050,6 @@ vuint32mf2_t test_vmv_s_x_u32mf2(vuint32mf2_t dst, uint32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32(<vscale x 2 x i32> [[SRC:%.*]])
@@ -1183,7 +1057,6 @@ vuint32mf2_t test_vmv_s_x_u32mf2(vuint32mf2_t dst, uint32_t src, size_t vl) {
 //
 uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1193,7 +1066,6 @@ vuint32m1_t test_vmv_s_x_u32m1(vuint32m1_t dst, uint32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]])
@@ -1201,7 +1073,6 @@ vuint32m1_t test_vmv_s_x_u32m1(vuint32m1_t dst, uint32_t src, size_t vl) {
 //
 uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1211,7 +1082,6 @@ vuint32m2_t test_vmv_s_x_u32m2(vuint32m2_t dst, uint32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]])
@@ -1219,7 +1089,6 @@ vuint32m2_t test_vmv_s_x_u32m2(vuint32m2_t dst, uint32_t src, size_t vl) {
 //
 uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1229,7 +1098,6 @@ vuint32m4_t test_vmv_s_x_u32m4(vuint32m4_t dst, uint32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]])
@@ -1237,7 +1105,6 @@ vuint32m4_t test_vmv_s_x_u32m4(vuint32m4_t dst, uint32_t src, size_t vl) {
 //
 uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1247,7 +1114,6 @@ vuint32m8_t test_vmv_s_x_u32m8(vuint32m8_t dst, uint32_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> [[SRC:%.*]])
@@ -1255,7 +1121,6 @@ vuint32m8_t test_vmv_s_x_u32m8(vuint32m8_t dst, uint32_t src, size_t vl) {
 //
 uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1265,7 +1130,6 @@ vuint64m1_t test_vmv_s_x_u64m1(vuint64m1_t dst, uint64_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]])
@@ -1273,7 +1137,6 @@ vuint64m1_t test_vmv_s_x_u64m1(vuint64m1_t dst, uint64_t src, size_t vl) {
 //
 uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1283,7 +1146,6 @@ vuint64m2_t test_vmv_s_x_u64m2(vuint64m2_t dst, uint64_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]])
@@ -1291,7 +1153,6 @@ vuint64m2_t test_vmv_s_x_u64m2(vuint64m2_t dst, uint64_t src, size_t vl) {
 //
 uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
@@ -1301,7 +1162,6 @@ vuint64m4_t test_vmv_s_x_u64m4(vuint64m4_t dst, uint64_t src, size_t vl) {
   return vmv_s(dst, src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]])
@@ -1309,7 +1169,6 @@ vuint64m4_t test_vmv_s_x_u64m4(vuint64m4_t dst, uint64_t src, size_t vl) {
 //
 uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { return vmv_x(src); }
 
-//
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c
index 378c7091b09f7..886d1874a6f84 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmxnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmxnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmxnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmxnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmxnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmxnor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxnor_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c
index a33292a33913b..89403a714b779 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
   return vmxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
   return vmxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
   return vmxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
   return vmxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
   return vmxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
   return vmxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vmxor_mm_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
index 19fb973b2127d..a6ccce0da14aa 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -156,7 +141,6 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -166,7 +150,6 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -237,7 +214,6 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -247,7 +223,6 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -257,7 +232,6 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -267,7 +241,6 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
   return vnclip(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -318,7 +287,6 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -328,7 +296,6 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -339,7 +306,6 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -349,7 +315,6 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -360,7 +325,6 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -370,7 +334,6 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -380,7 +343,6 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -390,7 +352,6 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -400,7 +361,6 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -410,7 +370,6 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -420,7 +379,6 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -430,7 +388,6 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -441,7 +398,6 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -451,7 +407,6 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -462,7 +417,6 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -472,7 +426,6 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -483,7 +436,6 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -493,7 +445,6 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -504,7 +455,6 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -514,7 +464,6 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -525,7 +474,6 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -535,7 +483,6 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -546,7 +493,6 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -556,7 +502,6 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -567,7 +512,6 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -577,7 +521,6 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -588,7 +531,6 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -598,7 +540,6 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -609,7 +550,6 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
   return vnclipu(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
index 99d7645074c50..0860a036a24f0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8 (vint16mf4_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4 (vint16mf2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2 (vint16m1_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8m1_t test_vncvt_x_x_w_i8m1 (vint16m2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8m2_t test_vncvt_x_x_w_i8m2 (vint16m4_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8m4_t test_vncvt_x_x_w_i8m4 (vint16m8_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8 (vuint16mf4_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4 (vuint16mf2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2 (vuint16m1_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint8m1_t test_vncvt_x_x_w_u8m1 (vuint16m2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint8m2_t test_vncvt_x_x_w_u8m2 (vuint16m4_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint8m4_t test_vncvt_x_x_w_u8m4 (vuint16m8_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4 (vint32mf2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2 (vint32m1_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16m1_t test_vncvt_x_x_w_i16m1 (vint32m2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16m2_t test_vncvt_x_x_w_i16m2 (vint32m4_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16m4_t test_vncvt_x_x_w_i16m4 (vint32m8_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4 (vuint32mf2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2 (vuint32m1_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint16m1_t test_vncvt_x_x_w_u16m1 (vuint32m2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint16m2_t test_vncvt_x_x_w_u16m2 (vuint32m4_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint16m4_t test_vncvt_x_x_w_u16m4 (vuint32m8_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2 (vint64m1_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint32m1_t test_vncvt_x_x_w_i32m1 (vint64m2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint32m2_t test_vncvt_x_x_w_i32m2 (vint64m4_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint32m4_t test_vncvt_x_x_w_i32m4 (vint64m8_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2 (vuint64m1_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint32m1_t test_vncvt_x_x_w_u32m1 (vuint64m2_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint32m2_t test_vncvt_x_x_w_u32m2 (vuint64m4_t src, size_t vl) {
   return vncvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
index 7bf8609efd502..896a506e38565 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
   return vneg(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vneg_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
index 1a4a68b633d04..b97e838ff1ec1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t v
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t v
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t v
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t v
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, siz
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, s
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, s
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, s
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, s
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t o
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
   return vnmsac(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o
   return vnmsac(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
index 2e6c5e3ef841e..ee2803020f0b0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t v
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t v
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t v
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t v
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, siz
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, s
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, s
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, s
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, s
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t o
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
   return vnmsub(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -904,7 +814,6 @@ vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -914,7 +823,6 @@ vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -924,7 +832,6 @@ vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -934,7 +841,6 @@ vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +850,6 @@ vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -954,7 +859,6 @@ vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +868,6 @@ vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -974,7 +877,6 @@ vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -984,7 +886,6 @@ vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -994,7 +895,6 @@ vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +904,6 @@ vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1014,7 +913,6 @@ vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +922,6 @@ vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1034,7 +931,6 @@ vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1044,7 +940,6 @@ vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1054,7 +949,6 @@ vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1064,7 +958,6 @@ vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1074,7 +967,6 @@ vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +976,6 @@ vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1094,7 +985,6 @@ vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +994,6 @@ vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1003,6 @@ vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1124,7 +1012,6 @@ vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1134,7 +1021,6 @@ vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1030,6 @@ vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1154,7 +1039,6 @@ vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1164,7 +1048,6 @@ vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1174,7 +1057,6 @@ vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1184,7 +1066,6 @@ vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1194,7 +1075,6 @@ vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1084,6 @@ vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1093,6 @@ vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1224,7 +1102,6 @@ vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1234,7 +1111,6 @@ vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1244,7 +1120,6 @@ vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1129,6 @@ vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1264,7 +1138,6 @@ vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1274,7 +1147,6 @@ vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1284,7 +1156,6 @@ vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1294,7 +1165,6 @@ vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1304,7 +1174,6 @@ vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1183,6 @@ vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1324,7 +1192,6 @@ vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1334,7 +1201,6 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1344,7 +1210,6 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1354,7 +1219,6 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1228,6 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1374,7 +1237,6 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1384,7 +1246,6 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1394,7 +1255,6 @@ vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1404,7 +1264,6 @@ vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1414,7 +1273,6 @@ vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1282,6 @@ vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1434,7 +1291,6 @@ vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1444,7 +1300,6 @@ vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1454,7 +1309,6 @@ vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1464,7 +1318,6 @@ vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1327,6 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1484,7 +1336,6 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1494,7 +1345,6 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1504,7 +1354,6 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1514,7 +1363,6 @@ vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1524,7 +1372,6 @@ vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1534,7 +1381,6 @@ vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1544,7 +1390,6 @@ vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1554,7 +1399,6 @@ vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1564,7 +1408,6 @@ vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1574,7 +1417,6 @@ vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1426,6 @@ vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1594,7 +1435,6 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1604,7 +1444,6 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1453,6 @@ vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1624,7 +1462,6 @@ vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1634,7 +1471,6 @@ vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1644,7 +1480,6 @@ vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1654,7 +1489,6 @@ vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1664,7 +1498,6 @@ vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1674,7 +1507,6 @@ vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1516,6 @@ vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1694,7 +1525,6 @@ vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1704,7 +1534,6 @@ vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1714,7 +1543,6 @@ vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1724,7 +1552,6 @@ vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1734,7 +1561,6 @@ vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1744,7 +1570,6 @@ vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1754,7 +1579,6 @@ vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o
   return vnmsub(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
index a558e9c05af4a..704ce355ef43a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -25,7 +23,6 @@ vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -35,7 +32,6 @@ vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -45,7 +41,6 @@ vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -55,7 +50,6 @@ vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -65,7 +59,6 @@ vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -75,7 +68,6 @@ vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -95,7 +86,6 @@ vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -105,7 +95,6 @@ vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -115,7 +104,6 @@ vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -125,7 +113,6 @@ vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -135,7 +122,6 @@ vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -145,7 +131,6 @@ vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -175,7 +158,6 @@ vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -185,7 +167,6 @@ vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -195,7 +176,6 @@ vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -205,7 +185,6 @@ vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -215,7 +194,6 @@ vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -225,7 +203,6 @@ vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -235,7 +212,6 @@ vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -245,7 +221,6 @@ vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -255,7 +230,6 @@ vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -265,7 +239,6 @@ vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -275,7 +248,6 @@ vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -285,7 +257,6 @@ vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 -1, i64 [[VL:%.*]])
@@ -295,7 +266,6 @@ vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -305,7 +275,6 @@ vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -315,7 +284,6 @@ vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -325,7 +293,6 @@ vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -335,7 +302,6 @@ vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -345,7 +311,6 @@ vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 -1, i64 [[VL:%.*]])
@@ -355,7 +320,6 @@ vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -365,7 +329,6 @@ vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -375,7 +338,6 @@ vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -385,7 +347,6 @@ vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -395,7 +356,6 @@ vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
@@ -405,7 +365,6 @@ vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -415,7 +374,6 @@ vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -425,7 +383,6 @@ vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])
@@ -435,7 +392,6 @@ vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) {
   return vnot(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnot_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 -1, i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
index 20278cac49e8f..7cc98b72a6f1f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl)
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl)
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl)
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
   return vnsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
index dffe4dba0289f..3919ac75d09da 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl)
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl)
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
   return vnsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
index cb43f77df9ab0..bcb03f0004c30 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vor_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c
index 4a7d9cb4a2ffd..2e3d92c93aba0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
   return vpopc(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
   return vpopc(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
   return vpopc(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
   return vpopc(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
   return vpopc(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
   return vpopc(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
   return vpopc(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
   return vpopc(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
   return vpopc(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
   return vpopc(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
   return vpopc(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
   return vpopc(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
   return vpopc(mask, op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
index 6aa7874eb761b..e1897f20e4245 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredand(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredand(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
index ed1ae5045732f..0796133bc67eb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredmax(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredmaxu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredmax(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredmaxu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
index bdf2205493dae..23767a815f6fe 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredmin(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredminu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredmin(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredminu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
index 60c303b6ae4e6..ad8c736225267 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
index a770eb9b0bfb9..96c3e938164e9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
index 9ad3e71aeaba8..247f67d69f018 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
   return vredxor(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -512,7 +466,6 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -524,7 +477,6 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -536,7 +488,6 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -548,7 +499,6 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -560,7 +510,6 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -572,7 +521,6 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -584,7 +532,6 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -596,7 +543,6 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -608,7 +554,6 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -620,7 +565,6 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +576,6 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -644,7 +587,6 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -656,7 +598,6 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +609,6 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +620,6 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +631,6 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +642,6 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +653,6 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +664,6 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +675,6 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +686,6 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +697,6 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +708,6 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +719,6 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +730,6 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +741,6 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +752,6 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +763,6 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +774,6 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +785,6 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +796,6 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +807,6 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -896,7 +818,6 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -908,7 +829,6 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -920,7 +840,6 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -932,7 +851,6 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -944,7 +862,6 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -956,7 +873,6 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -968,7 +884,6 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -980,7 +895,6 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -992,7 +906,6 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1004,7 +917,6 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vredxor(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
index 6b47207478286..2078823e24e51 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vrem(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vremu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
index 0186f689b8b92..0796155bc68da 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -47,7 +43,6 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -58,7 +53,6 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -68,7 +62,6 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -78,7 +71,6 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -88,7 +80,6 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -98,7 +89,6 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -108,7 +98,6 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -118,7 +107,6 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -128,7 +116,6 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -138,7 +125,6 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -148,7 +134,6 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -159,7 +144,6 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -169,7 +153,6 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -180,7 +163,6 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -190,7 +172,6 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -201,7 +182,6 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -211,7 +191,6 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -222,7 +201,6 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -232,7 +210,6 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -243,7 +220,6 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -253,7 +229,6 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -264,7 +239,6 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -274,7 +248,6 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -285,7 +258,6 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -295,7 +267,6 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -306,7 +277,6 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -316,7 +286,6 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -327,7 +296,6 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -337,7 +305,6 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -348,7 +315,6 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -358,7 +324,6 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -369,7 +334,6 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -379,7 +343,6 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -390,7 +353,6 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -400,7 +362,6 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -411,7 +372,6 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -421,7 +381,6 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -432,7 +391,6 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -442,7 +400,6 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -453,7 +410,6 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -463,7 +419,6 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -474,7 +429,6 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -484,7 +438,6 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -495,7 +448,6 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -505,7 +457,6 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -516,7 +467,6 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -526,7 +476,6 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -536,7 +485,6 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -546,7 +494,6 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -556,7 +503,6 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -566,7 +512,6 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -576,7 +521,6 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -586,7 +530,6 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -596,7 +539,6 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -606,7 +548,6 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -617,7 +558,6 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -628,7 +568,6 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -639,7 +578,6 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -650,7 +588,6 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -661,7 +598,6 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -671,7 +607,6 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -682,7 +617,6 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -692,7 +626,6 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -703,7 +636,6 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -713,7 +645,6 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -724,7 +655,6 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -734,7 +664,6 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -745,7 +674,6 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -756,7 +684,6 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -767,7 +694,6 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -777,7 +703,6 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -788,7 +713,6 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -798,7 +722,6 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -809,7 +732,6 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -819,7 +741,6 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -830,7 +751,6 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -840,7 +760,6 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -851,7 +770,6 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -861,7 +779,6 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -872,7 +789,6 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -882,7 +798,6 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -893,7 +808,6 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -903,7 +817,6 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -914,7 +827,6 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -924,7 +836,6 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -935,7 +846,6 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -946,7 +856,6 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -957,7 +866,6 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -967,7 +875,6 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -978,7 +885,6 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -988,7 +894,6 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -999,7 +904,6 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +913,6 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1020,7 +923,6 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1030,7 +932,6 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +942,6 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1051,7 +951,6 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1062,7 +961,6 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1072,7 +970,6 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1083,7 +980,6 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1093,7 +989,6 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +999,6 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
@@ -1114,7 +1008,6 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
   return vrgather(op1, index, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1125,7 +1018,6 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1136,7 +1028,6 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1147,7 +1038,6 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1157,7 +1047,6 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1167,7 +1056,6 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1177,7 +1065,6 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1188,7 +1075,6 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1199,7 +1085,6 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1210,7 +1095,6 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1221,7 +1105,6 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1232,7 +1115,6 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1243,7 +1125,6 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1254,7 +1135,6 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1265,7 +1145,6 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1276,7 +1155,6 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1287,7 +1165,6 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1298,7 +1175,6 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1309,7 +1185,6 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1320,7 +1195,6 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1331,7 +1205,6 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1342,7 +1215,6 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1353,7 +1225,6 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1364,7 +1235,6 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1375,7 +1245,6 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1386,7 +1255,6 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1397,7 +1265,6 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1408,7 +1275,6 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1419,7 +1285,6 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1430,7 +1295,6 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1441,7 +1305,6 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1452,7 +1315,6 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1463,7 +1325,6 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1474,7 +1335,6 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1485,7 +1345,6 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1496,7 +1355,6 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1507,7 +1365,6 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1518,7 +1375,6 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1529,7 +1385,6 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1540,7 +1395,6 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1551,7 +1405,6 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1562,7 +1415,6 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1573,7 +1425,6 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1584,7 +1435,6 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1595,7 +1445,6 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1606,7 +1455,6 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1617,7 +1465,6 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1628,7 +1475,6 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1639,7 +1485,6 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1650,7 +1495,6 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1661,7 +1505,6 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
   return vrgatherei16(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
index e9c97ad2726f9..6ec7bd9da966f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vrsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
index f1254d9b9bddc..e69ecbc72ecbf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vsadd(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -616,7 +555,6 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -626,7 +564,6 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -656,7 +591,6 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -666,7 +600,6 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -676,7 +609,6 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +618,6 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -696,7 +627,6 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -706,7 +636,6 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -717,7 +646,6 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -727,7 +655,6 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -737,7 +664,6 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -747,7 +673,6 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -757,7 +682,6 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -767,7 +691,6 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -777,7 +700,6 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -787,7 +709,6 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -797,7 +718,6 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +727,6 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -817,7 +736,6 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +745,6 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +754,6 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +763,6 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -857,7 +772,6 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -867,7 +781,6 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -877,7 +790,6 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vsaddu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
index a0c54d0f6faeb..5c90af66cb0cd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -499,7 +454,6 @@ vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -510,7 +464,6 @@ vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -521,7 +474,6 @@ vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -532,7 +484,6 @@ vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -543,7 +494,6 @@ vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -554,7 +504,6 @@ vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -565,7 +514,6 @@ vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -576,7 +524,6 @@ vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -587,7 +534,6 @@ vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -598,7 +544,6 @@ vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -609,7 +554,6 @@ vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -620,7 +564,6 @@ vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -631,7 +574,6 @@ vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -642,7 +584,6 @@ vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -653,7 +594,6 @@ vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -664,7 +604,6 @@ vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -675,7 +614,6 @@ vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -686,7 +624,6 @@ vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -697,7 +634,6 @@ vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -708,7 +644,6 @@ vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -719,7 +654,6 @@ vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -730,7 +664,6 @@ vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -741,7 +674,6 @@ vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -752,7 +684,6 @@ vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -763,7 +694,6 @@ vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -774,7 +704,6 @@ vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -785,7 +714,6 @@ vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -796,7 +724,6 @@ vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -807,7 +734,6 @@ vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -818,7 +744,6 @@ vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -829,7 +754,6 @@ vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -840,7 +764,6 @@ vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -851,7 +774,6 @@ vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -862,7 +784,6 @@ vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -873,7 +794,6 @@ vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -884,7 +804,6 @@ vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -895,7 +814,6 @@ vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -906,7 +824,6 @@ vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -917,7 +834,6 @@ vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -928,7 +844,6 @@ vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -939,7 +854,6 @@ vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -950,7 +864,6 @@ vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
@@ -961,7 +874,6 @@ vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
   return vsbc(op1, op2, borrowin, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
index 0cae4f5ea1546..b68f84208feef 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
   return vsext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
   return vsext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
   return vsext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
   return vsext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
   return vsext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
   return vsext_vf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
   return vsext_vf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
   return vsext_vf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
   return vsext_vf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
   return vsext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
   return vsext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
   return vsext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
   return vsext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
   return vsext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
index b4c435bba19da..cd6a17cd1d528 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -96,7 +87,6 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -116,7 +105,6 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -126,7 +114,6 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -136,7 +123,6 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -147,7 +133,6 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -157,7 +142,6 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -167,7 +151,6 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -177,7 +160,6 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -187,7 +169,6 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -197,7 +178,6 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -207,7 +187,6 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -217,7 +196,6 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -227,7 +205,6 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -238,7 +215,6 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -249,7 +225,6 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -260,7 +235,6 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -270,7 +244,6 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -280,7 +253,6 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -290,7 +262,6 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -300,7 +271,6 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -311,7 +281,6 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -322,7 +291,6 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -333,7 +301,6 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -344,7 +311,6 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -355,7 +321,6 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -366,7 +331,6 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -377,7 +341,6 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -388,7 +351,6 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -399,7 +361,6 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -410,7 +371,6 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -421,7 +381,6 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -432,7 +391,6 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -443,7 +401,6 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -454,7 +411,6 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
   return vslide1down(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
index 2014f217f1fd6..af7c351bf2e04 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -85,7 +77,6 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -96,7 +87,6 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -106,7 +96,6 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -116,7 +105,6 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -126,7 +114,6 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -136,7 +123,6 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -147,7 +133,6 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -157,7 +142,6 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -167,7 +151,6 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -177,7 +160,6 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -187,7 +169,6 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -197,7 +178,6 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -207,7 +187,6 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -217,7 +196,6 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -227,7 +205,6 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -237,7 +214,6 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -247,7 +223,6 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -257,7 +232,6 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -267,7 +241,6 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -308,7 +278,6 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -319,7 +288,6 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -330,7 +298,6 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -341,7 +308,6 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -352,7 +318,6 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -363,7 +328,6 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -374,7 +338,6 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -385,7 +348,6 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -396,7 +358,6 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -407,7 +368,6 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -418,7 +378,6 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -429,7 +388,6 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -440,7 +398,6 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
@@ -451,7 +408,6 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
   return vslide1up(src, value, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
index b5e0603ba3ff3..01088ee621947 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -214,7 +195,6 @@ vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -225,7 +205,6 @@ vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -236,7 +215,6 @@ vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -247,7 +225,6 @@ vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -258,7 +235,6 @@ vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -269,7 +245,6 @@ vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -280,7 +255,6 @@ vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -291,7 +265,6 @@ vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -302,7 +275,6 @@ vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -313,7 +285,6 @@ vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -324,7 +295,6 @@ vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -335,7 +305,6 @@ vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -346,7 +315,6 @@ vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -357,7 +325,6 @@ vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -368,7 +335,6 @@ vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -379,7 +345,6 @@ vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -401,7 +365,6 @@ vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -412,7 +375,6 @@ vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -423,7 +385,6 @@ vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -434,7 +395,6 @@ vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -445,7 +405,6 @@ vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -456,7 +415,6 @@ vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -467,7 +425,6 @@ vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -478,7 +435,6 @@ vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -489,7 +445,6 @@ vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -511,7 +465,6 @@ vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -522,7 +475,6 @@ vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -533,7 +485,6 @@ vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -544,7 +495,6 @@ vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -555,7 +505,6 @@ vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -566,7 +515,6 @@ vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -577,7 +525,6 @@ vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -588,7 +535,6 @@ vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
   return vslidedown(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -600,7 +546,6 @@ vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -612,7 +557,6 @@ vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -624,7 +568,6 @@ vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -635,7 +578,6 @@ vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -646,7 +588,6 @@ vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -657,7 +598,6 @@ vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -668,7 +608,6 @@ vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -680,7 +619,6 @@ vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -692,7 +630,6 @@ vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -704,7 +641,6 @@ vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -716,7 +652,6 @@ vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -728,7 +663,6 @@ vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -740,7 +674,6 @@ vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -752,7 +685,6 @@ vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -764,7 +696,6 @@ vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -776,7 +707,6 @@ vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -788,7 +718,6 @@ vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +729,6 @@ vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -812,7 +740,6 @@ vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -824,7 +751,6 @@ vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -836,7 +762,6 @@ vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -848,7 +773,6 @@ vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -860,7 +784,6 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -872,7 +795,6 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -884,7 +806,6 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -895,7 +816,6 @@ vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -906,7 +826,6 @@ vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -917,7 +836,6 @@ vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -928,7 +846,6 @@ vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -940,7 +857,6 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -952,7 +868,6 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -964,7 +879,6 @@ vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -976,7 +890,6 @@ vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -988,7 +901,6 @@ vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1000,7 +912,6 @@ vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1012,7 +923,6 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1024,7 +934,6 @@ vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1036,7 +945,6 @@ vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1048,7 +956,6 @@ vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1060,7 +967,6 @@ vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1072,7 +978,6 @@ vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1084,7 +989,6 @@ vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1096,7 +1000,6 @@ vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1108,7 +1011,6 @@ vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1120,7 +1022,6 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1033,6 @@ vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1144,7 +1044,6 @@ vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1156,7 +1055,6 @@ vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1168,7 +1066,6 @@ vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1180,7 +1077,6 @@ vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1192,7 +1088,6 @@ vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1204,7 +1099,6 @@ vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
   return vslidedown(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
index 0dcc7399817d3..93de0f4c364dc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
@@ -5,7 +5,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -16,7 +15,6 @@ vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -27,7 +25,6 @@ vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -38,7 +35,6 @@ vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -49,7 +45,6 @@ vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -60,7 +55,6 @@ vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -71,7 +65,6 @@ vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -82,7 +75,6 @@ vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -93,7 +85,6 @@ vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -104,7 +95,6 @@ vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -115,7 +105,6 @@ vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -126,7 +115,6 @@ vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -137,7 +125,6 @@ vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -148,7 +135,6 @@ vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -159,7 +145,6 @@ vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -170,7 +155,6 @@ vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -181,7 +165,6 @@ vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -192,7 +175,6 @@ vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -203,7 +185,6 @@ vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -214,7 +195,6 @@ vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -225,7 +205,6 @@ vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -236,7 +215,6 @@ vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -247,7 +225,6 @@ vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -258,7 +235,6 @@ vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -269,7 +245,6 @@ vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -280,7 +255,6 @@ vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -291,7 +265,6 @@ vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -302,7 +275,6 @@ vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -313,7 +285,6 @@ vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -324,7 +295,6 @@ vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -335,7 +305,6 @@ vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -346,7 +315,6 @@ vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -357,7 +325,6 @@ vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -368,7 +335,6 @@ vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -379,7 +345,6 @@ vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -390,7 +355,6 @@ vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -401,7 +365,6 @@ vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -412,7 +375,6 @@ vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -423,7 +385,6 @@ vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -434,7 +395,6 @@ vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -445,7 +405,6 @@ vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -456,7 +415,6 @@ vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -467,7 +425,6 @@ vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -478,7 +435,6 @@ vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -489,7 +445,6 @@ vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -500,7 +455,6 @@ vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -511,7 +465,6 @@ vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -522,7 +475,6 @@ vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -533,7 +485,6 @@ vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -544,7 +495,6 @@ vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -555,7 +505,6 @@ vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -566,7 +515,6 @@ vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -577,7 +525,6 @@ vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
@@ -588,7 +535,6 @@ vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
   return vslideup(dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -599,7 +545,6 @@ vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -610,7 +555,6 @@ vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -621,7 +565,6 @@ vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -632,7 +575,6 @@ vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -643,7 +585,6 @@ vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -654,7 +595,6 @@ vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -665,7 +605,6 @@ vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -677,7 +616,6 @@ vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -689,7 +627,6 @@ vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -700,7 +637,6 @@ vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -711,7 +647,6 @@ vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -722,7 +657,6 @@ vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -733,7 +667,6 @@ vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -745,7 +678,6 @@ vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -756,7 +688,6 @@ vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -767,7 +698,6 @@ vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -778,7 +708,6 @@ vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -789,7 +718,6 @@ vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -800,7 +728,6 @@ vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -811,7 +738,6 @@ vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -822,7 +748,6 @@ vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -833,7 +758,6 @@ vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -845,7 +769,6 @@ vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -857,7 +780,6 @@ vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -869,7 +791,6 @@ vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -880,7 +801,6 @@ vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -891,7 +811,6 @@ vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -902,7 +821,6 @@ vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -913,7 +831,6 @@ vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -925,7 +842,6 @@ vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -937,7 +853,6 @@ vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -949,7 +864,6 @@ vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -961,7 +875,6 @@ vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -973,7 +886,6 @@ vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -985,7 +897,6 @@ vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -997,7 +908,6 @@ vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1009,7 +919,6 @@ vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1021,7 +930,6 @@ vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1033,7 +941,6 @@ vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1045,7 +952,6 @@ vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1057,7 +963,6 @@ vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1069,7 +974,6 @@ vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1081,7 +985,6 @@ vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1093,7 +996,6 @@ vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1105,7 +1007,6 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1117,7 +1018,6 @@ vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1129,7 +1029,6 @@ vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1141,7 +1040,6 @@ vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1153,7 +1051,6 @@ vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1165,7 +1062,6 @@ vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1177,7 +1073,6 @@ vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1189,7 +1084,6 @@ vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
   return vslideup(mask, dst, src, offset, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
index fd129f2fa2786..ecdd3b511eb60 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vsll(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
index a1ebb87611a50..feaaf99c431e0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vsmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
index 48e112d0c0b97..fffedf030bf59 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl)
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl)
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl)
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vsra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
index 57d0b89693a14..5e255b148326b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vsrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
index 3970f86a2a48a..ef6712e2490d1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vssra(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
index e947b25b8fbc8..7f4f2fe04a5e0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -155,7 +140,6 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -165,7 +149,6 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -176,7 +159,6 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -186,7 +168,6 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -196,7 +177,6 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -206,7 +186,6 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -216,7 +195,6 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -226,7 +204,6 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -236,7 +213,6 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -246,7 +222,6 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -256,7 +231,6 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -266,7 +240,6 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -277,7 +250,6 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -287,7 +259,6 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -297,7 +268,6 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -307,7 +277,6 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -317,7 +286,6 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -327,7 +295,6 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -337,7 +304,6 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -347,7 +313,6 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -357,7 +322,6 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -367,7 +331,6 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -377,7 +340,6 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -387,7 +349,6 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -397,7 +358,6 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -407,7 +367,6 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -417,7 +376,6 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -427,7 +385,6 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
@@ -437,7 +394,6 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
   return vssrl(op1, shift, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
index 1bf7a6f6d10f1..4e9a53dd3ef2e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vssub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -595,7 +536,6 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -605,7 +545,6 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -616,7 +555,6 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -626,7 +564,6 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -656,7 +591,6 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -666,7 +600,6 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -676,7 +609,6 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +618,6 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -696,7 +627,6 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -706,7 +636,6 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -717,7 +646,6 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -727,7 +655,6 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -737,7 +664,6 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -747,7 +673,6 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -757,7 +682,6 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -767,7 +691,6 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -777,7 +700,6 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -787,7 +709,6 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -797,7 +718,6 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +727,6 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -817,7 +736,6 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -827,7 +745,6 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -837,7 +754,6 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -847,7 +763,6 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -857,7 +772,6 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -867,7 +781,6 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -877,7 +790,6 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vssubu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
index 3df67c8e37de4..3687437769ffe 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vsub(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
index af01a772e7b6c..f9bea4ec1b8fd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vwadd_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vwadd_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
   return vwadd_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
   return vwadd_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -615,7 +554,6 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -625,7 +563,6 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -657,7 +592,6 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -667,7 +601,6 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -678,7 +611,6 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -688,7 +620,6 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -698,7 +629,6 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -708,7 +638,6 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -718,7 +647,6 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -728,7 +656,6 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -738,7 +665,6 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -748,7 +674,6 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -758,7 +683,6 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -768,7 +692,6 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -778,7 +701,6 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -788,7 +710,6 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -798,7 +719,6 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -808,7 +728,6 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -818,7 +737,6 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -828,7 +746,6 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -838,7 +755,6 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -848,7 +764,6 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -859,7 +774,6 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -869,7 +783,6 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -880,7 +793,6 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -890,7 +802,6 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -901,7 +812,6 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -911,7 +821,6 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -921,7 +830,6 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -931,7 +839,6 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -941,7 +848,6 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -951,7 +857,6 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -961,7 +866,6 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -971,7 +875,6 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -981,7 +884,6 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -991,7 +893,6 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1001,7 +902,6 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +911,6 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1021,7 +920,6 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +929,6 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +938,6 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1051,7 +947,6 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1062,7 +957,6 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1072,7 +966,6 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1082,7 +975,6 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1092,7 +984,6 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1102,7 +993,6 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1112,7 +1002,6 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1122,7 +1011,6 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1020,6 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1142,7 +1029,6 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1152,7 +1038,6 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1162,7 +1047,6 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1172,7 +1056,6 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
   return vwaddu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1182,7 +1065,6 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vwaddu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1192,7 +1074,6 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vwaddu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1202,7 +1083,6 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
   return vwaddu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
index 94df022067036..cb824092e14fc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
   return vwcvt_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
   return vwcvtu_x(src, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
index ebf63d8fd0de2..1c245443da00c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
   return vwmacc(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -411,7 +374,6 @@ vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -422,7 +384,6 @@ vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -433,7 +394,6 @@ vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +404,6 @@ vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -455,7 +414,6 @@ vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -466,7 +424,6 @@ vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -477,7 +434,6 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -488,7 +444,6 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -499,7 +454,6 @@ vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -510,7 +464,6 @@ vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -521,7 +474,6 @@ vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -532,7 +484,6 @@ vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -543,7 +494,6 @@ vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +504,6 @@ vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -565,7 +514,6 @@ vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -576,7 +524,6 @@ vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -587,7 +534,6 @@ vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -598,7 +544,6 @@ vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -609,7 +554,6 @@ vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -620,7 +564,6 @@ vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -631,7 +574,6 @@ vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -642,7 +584,6 @@ vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -653,7 +594,6 @@ vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +604,6 @@ vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
   return vwmaccu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -675,7 +614,6 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -686,7 +624,6 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -697,7 +634,6 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -708,7 +644,6 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -719,7 +654,6 @@ vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -730,7 +664,6 @@ vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -741,7 +674,6 @@ vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -752,7 +684,6 @@ vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -763,7 +694,6 @@ vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +704,6 @@ vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -785,7 +714,6 @@ vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -796,7 +724,6 @@ vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -807,7 +734,6 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -818,7 +744,6 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -829,7 +754,6 @@ vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -840,7 +764,6 @@ vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -851,7 +774,6 @@ vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -862,7 +784,6 @@ vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -873,7 +794,6 @@ vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +804,6 @@ vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -895,7 +814,6 @@ vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -906,7 +824,6 @@ vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -917,7 +834,6 @@ vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -928,7 +844,6 @@ vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -939,7 +854,6 @@ vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -950,7 +864,6 @@ vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -961,7 +874,6 @@ vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -972,7 +884,6 @@ vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -983,7 +894,6 @@ vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -994,7 +904,6 @@ vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
   return vwmaccsu(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1005,7 +914,6 @@ vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1016,7 +924,6 @@ vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1027,7 +934,6 @@ vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1038,7 +944,6 @@ vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1049,7 +954,6 @@ vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1060,7 +964,6 @@ vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1071,7 +974,6 @@ vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1082,7 +984,6 @@ vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1093,7 +994,6 @@ vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1104,7 +1004,6 @@ vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1115,7 +1014,6 @@ vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1126,7 +1024,6 @@ vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1137,7 +1034,6 @@ vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1148,7 +1044,6 @@ vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1159,7 +1054,6 @@ vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,
   return vwmaccus(acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1170,7 +1064,6 @@ vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1181,7 +1074,6 @@ vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1192,7 +1084,6 @@ vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1203,7 +1094,6 @@ vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1214,7 +1104,6 @@ vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1225,7 +1114,6 @@ vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1236,7 +1124,6 @@ vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1247,7 +1134,6 @@ vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1258,7 +1144,6 @@ vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1269,7 +1154,6 @@ vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1280,7 +1164,6 @@ vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1291,7 +1174,6 @@ vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1303,7 +1185,6 @@ vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1314,7 +1195,6 @@ vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1325,7 +1205,6 @@ vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1336,7 +1215,6 @@ vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1347,7 +1225,6 @@ vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1358,7 +1235,6 @@ vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1369,7 +1245,6 @@ vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1380,7 +1255,6 @@ vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1391,7 +1265,6 @@ vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1402,7 +1275,6 @@ vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1413,7 +1285,6 @@ vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1424,7 +1295,6 @@ vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1435,7 +1305,6 @@ vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1446,7 +1315,6 @@ vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1457,7 +1325,6 @@ vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1468,7 +1335,6 @@ vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1479,7 +1345,6 @@ vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1490,7 +1355,6 @@ vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
   return vwmacc(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1502,7 +1366,6 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1513,7 +1376,6 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1525,7 +1387,6 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1536,7 +1397,6 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1548,7 +1408,6 @@ vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1559,7 +1418,6 @@ vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1570,7 +1428,6 @@ vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1581,7 +1438,6 @@ vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1592,7 +1448,6 @@ vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1603,7 +1458,6 @@ vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1614,7 +1468,6 @@ vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1625,7 +1478,6 @@ vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1637,7 +1489,6 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1649,7 +1500,6 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1661,7 +1511,6 @@ vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1672,7 +1521,6 @@ vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1684,7 +1532,6 @@ vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1695,7 +1542,6 @@ vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1707,7 +1553,6 @@ vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1718,7 +1563,6 @@ vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1730,7 +1574,6 @@ vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1741,7 +1584,6 @@ vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1753,7 +1595,6 @@ vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1764,7 +1605,6 @@ vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1776,7 +1616,6 @@ vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1787,7 +1626,6 @@ vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1799,7 +1637,6 @@ vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1810,7 +1647,6 @@ vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1822,7 +1658,6 @@ vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1833,7 +1668,6 @@ vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
   return vwmaccu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1845,7 +1679,6 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1856,7 +1689,6 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1868,7 +1700,6 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1879,7 +1710,6 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1891,7 +1721,6 @@ vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1902,7 +1731,6 @@ vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1913,7 +1741,6 @@ vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1924,7 +1751,6 @@ vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1935,7 +1761,6 @@ vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1946,7 +1771,6 @@ vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1957,7 +1781,6 @@ vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1968,7 +1791,6 @@ vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1980,7 +1802,6 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -1992,7 +1813,6 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2004,7 +1824,6 @@ vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2015,7 +1834,6 @@ vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2027,7 +1845,6 @@ vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2038,7 +1855,6 @@ vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2050,7 +1866,6 @@ vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2061,7 +1876,6 @@ vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2073,7 +1887,6 @@ vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2084,7 +1897,6 @@ vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2096,7 +1908,6 @@ vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2107,7 +1918,6 @@ vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2119,7 +1929,6 @@ vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2130,7 +1939,6 @@ vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2142,7 +1950,6 @@ vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2153,7 +1960,6 @@ vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2165,7 +1971,6 @@ vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2176,7 +1981,6 @@ vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
   return vwmaccsu(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2187,7 +1991,6 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2198,7 +2001,6 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2209,7 +2011,6 @@ vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2220,7 +2021,6 @@ vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2231,7 +2031,6 @@ vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2242,7 +2041,6 @@ vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2254,7 +2052,6 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2265,7 +2062,6 @@ vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2276,7 +2072,6 @@ vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2287,7 +2082,6 @@ vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2298,7 +2092,6 @@ vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2309,7 +2102,6 @@ vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2320,7 +2112,6 @@ vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -2331,7 +2122,6 @@ vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
   return vwmaccus(mask, acc, op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
index e07ef43e8f559..11a07957bfdd9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vwmul(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl)
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl)
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vwmulu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -884,7 +796,6 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -894,7 +805,6 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vwmulsu(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
index 718f6889d5d29..f06985ca8bc88 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -15,7 +14,6 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -26,7 +24,6 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -37,7 +34,6 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -48,7 +44,6 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -59,7 +54,6 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -70,7 +64,6 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -81,7 +74,6 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -92,7 +84,6 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -103,7 +94,6 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -114,7 +104,6 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -125,7 +114,6 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -136,7 +124,6 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -147,7 +134,6 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -158,7 +144,6 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -169,7 +154,6 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -180,7 +164,6 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -191,7 +174,6 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -202,7 +184,6 @@ vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector,
   return vwredsum(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -213,7 +194,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -224,7 +204,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -235,7 +214,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -246,7 +224,6 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -257,7 +234,6 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -268,7 +244,6 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -279,7 +254,6 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -290,7 +264,6 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -301,7 +274,6 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -312,7 +284,6 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -323,7 +294,6 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -334,7 +304,6 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -345,7 +314,6 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -356,7 +324,6 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -367,7 +334,6 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -378,7 +344,6 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -389,7 +354,6 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
@@ -400,7 +364,6 @@ vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector,
   return vwredsumu(dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -412,7 +375,6 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -424,7 +386,6 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -436,7 +397,6 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -448,7 +408,6 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -460,7 +419,6 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -472,7 +430,6 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -484,7 +441,6 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -496,7 +452,6 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -508,7 +463,6 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -520,7 +474,6 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -532,7 +485,6 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -544,7 +496,6 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -556,7 +507,6 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -568,7 +518,6 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -580,7 +529,6 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -592,7 +540,6 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -604,7 +551,6 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -616,7 +562,6 @@ vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst,
   return vwredsum(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -628,7 +573,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -640,7 +584,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -652,7 +595,6 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -664,7 +606,6 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -676,7 +617,6 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -688,7 +628,6 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -700,7 +639,6 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -712,7 +650,6 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -724,7 +661,6 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -736,7 +672,6 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -748,7 +683,6 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -760,7 +694,6 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -772,7 +705,6 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -784,7 +716,6 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -796,7 +727,6 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -808,7 +738,6 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
@@ -820,7 +749,6 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst,
   return vwredsumu(mask, dst, vector, scalar, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
index eb883d3400471..6bfe9df38cf33 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vwsub_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
   return vwsub_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
   return vwsub_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
   return vwsub_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -615,7 +554,6 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -625,7 +563,6 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -636,7 +573,6 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -646,7 +582,6 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -657,7 +592,6 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -667,7 +601,6 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -678,7 +611,6 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -688,7 +620,6 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -698,7 +629,6 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -708,7 +638,6 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -718,7 +647,6 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -728,7 +656,6 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -738,7 +665,6 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -748,7 +674,6 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -758,7 +683,6 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -768,7 +692,6 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -778,7 +701,6 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -788,7 +710,6 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -798,7 +719,6 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -808,7 +728,6 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -818,7 +737,6 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -828,7 +746,6 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -838,7 +755,6 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -848,7 +764,6 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -859,7 +774,6 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -869,7 +783,6 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -880,7 +793,6 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -890,7 +802,6 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -901,7 +812,6 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -911,7 +821,6 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -921,7 +830,6 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -931,7 +839,6 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -941,7 +848,6 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -951,7 +857,6 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -961,7 +866,6 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -971,7 +875,6 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -981,7 +884,6 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -991,7 +893,6 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1001,7 +902,6 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1011,7 +911,6 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1021,7 +920,6 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1031,7 +929,6 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1041,7 +938,6 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1051,7 +947,6 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1062,7 +957,6 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1072,7 +966,6 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1082,7 +975,6 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1092,7 +984,6 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1102,7 +993,6 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1112,7 +1002,6 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1122,7 +1011,6 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1132,7 +1020,6 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1142,7 +1029,6 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1152,7 +1038,6 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1162,7 +1047,6 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1172,7 +1056,6 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
   return vwsubu_wx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1182,7 +1065,6 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vwsubu_vv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1192,7 +1074,6 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vwsubu_vx(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -1202,7 +1083,6 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
   return vwsubu_wv(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
index 5dc40eff2a3ee..485c895fd7746 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -284,7 +256,6 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -294,7 +265,6 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -304,7 +274,6 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -314,7 +283,6 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -324,7 +292,6 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -334,7 +301,6 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -344,7 +310,6 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -354,7 +319,6 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -364,7 +328,6 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -374,7 +337,6 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -384,7 +346,6 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -394,7 +355,6 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -404,7 +364,6 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -414,7 +373,6 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -424,7 +382,6 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -434,7 +391,6 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -444,7 +400,6 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -454,7 +409,6 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -464,7 +418,6 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -474,7 +427,6 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -484,7 +436,6 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -494,7 +445,6 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -504,7 +454,6 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -514,7 +463,6 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -524,7 +472,6 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -534,7 +481,6 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -544,7 +490,6 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -554,7 +499,6 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -564,7 +508,6 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -574,7 +517,6 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -584,7 +526,6 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -594,7 +535,6 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl)
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -604,7 +544,6 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -614,7 +553,6 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl)
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -624,7 +562,6 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -634,7 +571,6 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -644,7 +580,6 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -654,7 +589,6 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -664,7 +598,6 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -674,7 +607,6 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -684,7 +616,6 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -694,7 +625,6 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -704,7 +634,6 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -714,7 +643,6 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl)
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -724,7 +652,6 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -734,7 +661,6 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -744,7 +670,6 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -754,7 +679,6 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -764,7 +688,6 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -774,7 +697,6 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -784,7 +706,6 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -794,7 +715,6 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -804,7 +724,6 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -814,7 +733,6 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -824,7 +742,6 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -834,7 +751,6 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -844,7 +760,6 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -854,7 +769,6 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
@@ -864,7 +778,6 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
@@ -874,7 +787,6 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
   return vxor(op1, op2, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
index 0d46b81e085ea..f30e162841dd2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
@@ -4,7 +4,6 @@
 
 #include <riscv_vector.h>
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -14,7 +13,6 @@ vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -24,7 +22,6 @@ vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -34,7 +31,6 @@ vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -44,7 +40,6 @@ vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -54,7 +49,6 @@ vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -64,7 +58,6 @@ vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -74,7 +67,6 @@ vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
   return vzext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -84,7 +76,6 @@ vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
   return vzext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -94,7 +85,6 @@ vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
   return vzext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -104,7 +94,6 @@ vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
   return vzext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -114,7 +103,6 @@ vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
   return vzext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -124,7 +112,6 @@ vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
   return vzext_vf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -134,7 +121,6 @@ vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
   return vzext_vf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -144,7 +130,6 @@ vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
   return vzext_vf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -154,7 +139,6 @@ vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
   return vzext_vf8(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -164,7 +148,6 @@ vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -174,7 +157,6 @@ vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -184,7 +166,6 @@ vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -194,7 +175,6 @@ vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -204,7 +184,6 @@ vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -214,7 +193,6 @@ vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
   return vzext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -224,7 +202,6 @@ vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
   return vzext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -234,7 +211,6 @@ vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
   return vzext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -244,7 +220,6 @@ vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
   return vzext_vf4(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -254,7 +229,6 @@ vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -264,7 +238,6 @@ vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
@@ -274,7 +247,6 @@ vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
   return vzext_vf2(op1, vl);
 }
 
-//
 // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])


        


More information about the cfe-commits mailing list